input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= from.style;
/** @type {string} */
style.left = labelPos.x + "px";
/** @type {string} */
style.top = labelPos.y + "px";
/** @type {string} */
style.display = this.fitsInCanvas(labelPos, canvas) ? "" : "none";
options.onPlaceLabel(from, lab);
}
});
Hypertree.Plot.NodeTypes = new Class({
none : {
/** @type {function (): undefined} */
render : $.empty,
contains : $.lambda(false)
},
circle : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("dim");
this.nodeHelper.circle.render("fill", lab, qualifier, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("dim");
return this.nodeHelper.circle.contains(attributes, value, actual);
}
},
ellipse : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("width");
var cycle = adj.getData("height");
this.nodeHelper.ellipse.render("fill", lab, qualifier, cycle, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("width");
var epsilon = opt_attributes.getData("height");
return this.nodeHelper.ellipse.contains(attributes, value, actual, epsilon);
}
},
square : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("dim");
this.nodeHelper.square.render("fill", lab, qualifier, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("dim");
return this.nodeHelper.square.contains(attributes, value, actual);
}
},
rectangle : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("width");
var cycle = adj.getData("height");
this.nodeHelper.rectangle.render("fill", lab, qualifier, cycle, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("width");
var epsilon = opt_attributes.getData("height");
return this.nodeHelper.rectangle.contains(attributes, value, actual, epsilon);
}
},
triangle : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("dim");
this.nodeHelper.triangle.render("fill", lab, qualifier, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("dim");
return this.nodeHelper.triangle.contains(attributes, value, actual);
}
},
star : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var lab = adj.pos.getc(true);
var qualifier = adj.getData("dim");
this.nodeHelper.star.render("fill", lab, qualifier, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.pos.getc(true);
var actual = opt_attributes.getData("dim");
return this.nodeHelper.star.contains(attributes, value, actual);
}
}
});
Hypertree.Plot.EdgeTypes = new Class({
/** @type {function (): undefined} */
none : $.empty,
line : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var from = adj.nodeFrom.pos.getc(true);
var lab = adj.nodeTo.pos.getc(true);
this.edgeHelper.line.render(from, lab, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.nodeFrom.pos.getc(true);
var pdataOld = opt_attributes.nodeTo.pos.getc(true);
return this.edgeHelper.line.contains(attributes, pdataOld, value, this.edge.epsilon);
}
},
arrow : {
/**
* @param {?} adj
* @param {?} type
* @return {undefined}
*/
render : function(adj, type) {
var from = adj.nodeFrom.pos.getc(true);
var lab = adj.nodeTo.pos.getc(true);
var qualifier = adj.getData("dim");
var direction = adj.data.$direction;
var cycle = direction && (direction.length > 1 && direction[0] != adj.nodeFrom.id);
this.edgeHelper.arrow.render(from, lab, qualifier, cycle, type);
},
/**
* @param {?} opt_attributes
* @param {?} value
* @return {?}
*/
contains : function(opt_attributes, value) {
var attributes = opt_attributes.nodeFrom.pos.getc(true);
var pdataOld = opt_attributes.nodeTo.pos.getc(true);
return this.edgeHelper.arrow.contains(attributes, pdataOld, value, this.edge.epsilon);
}
}
});
})($jit.ForceDirected);
$jit.TM = {};
var Hypertree = $jit.TM;
/** @type {boolean} */
$jit.TM.$extend = true;
Hypertree.Base = {
layout : {
orientation : "h",
/**
* @return {?}
*/
vertical : function() {
return this.orientation == "v";
},
/**
* @return {?}
*/
horizontal : function() {
return this.orientation == "h";
},
/**
* @return {undefined}
*/
change : function() {
/** @type {string} */
this.orientation = this.vertical() ? "h" : "v";
}
},
/**
* @param {?} controller
* @return {undefined}
*/
initialize : function(controller) {
var config = {
orientation : "h",
titleHeight : 13,
offset : 2,
levelsToShow : 0,
constrained : false,
animate : false,
Node : {
type : "rectangle",
overridable : true,
width : 3,
height : 3,
color : "#444"
},
Label : {
textAlign : "center",
textBaseline : "top"
},
Edge : {
type : "none"
},
duration : 700,
fps : 45
};
this.controller = this.config = $.merge(Options("Canvas", "Node", "Edge", "Fx", "Controller", "Tips", "NodeStyles", "Events", "Navigation", "Label"), config, controller);
this.layout.orientation = this.config.orientation;
var canvasConfig = this.config;
if (canvasConfig.useCanvas) {
this.canvas = canvasConfig.useCanvas;
/** @type {string} */
this.config.labelContainer = this.canvas.id + "-label";
} else {
if (canvasConfig.background) {
canvasConfig.background = $.merge({
type : "Circles"
}, canvasConfig.background);
}
this.canvas = new Canvas(this, canvasConfig);
/** @type {string} */
this.config.labelContainer = (typeof canvasConfig.injectInto == "string" ? canvasConfig.injectInto : canvasConfig.injectInto.id) + "-label";
}
this.graphOptions = {
/** @type {function (number, (number|string)): undefined} */
klass : Vector,
Node : {
selected : false,
exist : true,
drawn : true
}
};
this.graph = new Graph(this.graphOptions, this.config.Node, this.config.Edge);
this.labels = new Hypertree.Label[canvasConfig.Label.type](this);
this.fx = new Hypertree.Plot(this);
this.op = new Hypertree.Op(this);
this.group = new Hypertree.Group(this);
this.geom = new Hypertree.Geom(this);
/** @type {null} */
this.clickedNode = null;
/** @type {boolean} */
this.busy = false;
this.initializeExtras();
},
/**
* @return {undefined}
*/
refresh : function() {
if (this.busy) {
return;
}
/** @type {boolean} */
this.busy = true;
var that = this;
if (this.config.animate) {
this.compute("end");
if (this.config.levelsToShow > 0) {
this.geom.setRightLevelToShow(this.graph.getNode(this.clickedNode && this.clickedNode.id || this.root));
}
this.fx.animate($.merge(this.config, {
modes : ["linear", "node-property:width:height"],
/**
* @return {undefined}
*/
onComplete : function() {
/** @type {boolean} */
that.busy = false;
}
}));
} else {
var type = this.config.Label.type;
if (type != "Native") {
that = this;
this.graph.eachNode(function(from) {
that.labels.hideLabel(from, false);
});
}
/** @type {boolean} */
this.busy = false;
this.compute();
if (this.config.levelsToShow > 0) {
this.geom.setRightLevelToShow(this.graph.getNode(this.clickedNode && this.clickedNode.id || this.root));
}
this.plot();
}
},
/**
* @return {undefined}
*/
plot : function() {
this.fx.plot();
},
/**
* @param {?} n
* @return {?}
*/
leaf : function(n) {
return n.getSubnodes([1, 1], "ignore").length == 0;
},
/**
* @param {?} n
* @return {undefined}
*/
enter : function(n) {
if (this.busy) {
return;
}
/** @type {boolean} */
this.busy = true;
var that = this;
var config = this.config;
var graph = this.graph;
var node = n;
var previousClickedNode = this.clickedNode;
var callback = {
/**
* @return {undefined}
*/
onComplete : function() {
if (config.levelsToShow > 0) {
that.geom.setRightLevelToShow(n);
}
if (config.levelsToShow > 0 || config.request) {
that.compute();
}
if (config.animate) {
graph.nodeList.setData("alpha", 0, "end");
n.eachSubgraph(function(n) {
n.setData("alpha", 1, "end");
}, "ignore");
that.fx.animate({
duration : 500,
modes : ["node-property:alpha"],
/**
* @return {undefined}
*/
onComplete : function() {
that.clickedNode = node;
that.compute("end");
that.clickedNode = previousClickedNode;
that.fx.animate({
modes : ["linear", "node-property:width:height"],
duration : 1E3,
/**
* @return {undefined}
*/
onComplete : function() {
/** @type {boolean} */
that.busy = false;
that.clickedNode = node;
}
});
}
});
} else {
/** @type {boolean} */
that.busy = false;
that.clickedNode = n;
that.refresh();
}
}
};
if (config.request) {
this.requestNodes(node, callback);
} else {
callback.onComplete();
}
},
/**
* @return {undefined}
*/
out : function() {
if (this.busy) {
return;
}
/** @type {boolean} */
this.busy = true;
/** @type {boolean} */
this.events.hoveredNode = false;
var that = this;
var config = this.config;
| |
<reponame>tweak-com-public/tweak-api-client-python<gh_stars>0
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ImageFolderMemberApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def image_folder_members_change_stream_get(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_change_stream_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.image_folder_members_change_stream_get_with_http_info(**kwargs)
else:
(data) = self.image_folder_members_change_stream_get_with_http_info(**kwargs)
return data
def image_folder_members_change_stream_get_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_change_stream_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_folder_members_change_stream_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/ImageFolderMembers/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'options' in params:
query_params['options'] = params['options']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def image_folder_members_change_stream_post(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_change_stream_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.image_folder_members_change_stream_post_with_http_info(**kwargs)
else:
(data) = self.image_folder_members_change_stream_post_with_http_info(**kwargs)
return data
def image_folder_members_change_stream_post_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_change_stream_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_folder_members_change_stream_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/ImageFolderMembers/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'options' in params:
form_params.append(('options', params['options']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def image_folder_members_count_get(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_count_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.image_folder_members_count_get_with_http_info(**kwargs)
else:
(data) = self.image_folder_members_count_get_with_http_info(**kwargs)
return data
def image_folder_members_count_get_with_http_info(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_count_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_folder_members_count_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/ImageFolderMembers/count'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def image_folder_members_find_one_get(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_find_one_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: ImageFolderMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.image_folder_members_find_one_get_with_http_info(**kwargs)
else:
(data) = self.image_folder_members_find_one_get_with_http_info(**kwargs)
return data
def image_folder_members_find_one_get_with_http_info(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.image_folder_members_find_one_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: ImageFolderMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method image_folder_members_find_one_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = | |
: 4,
ToontownCentral : 4, # TT streets are in 5
MyEstate : 5.5,
DonaldsDock : 6,
MinniesMelodyland: 6,
GoofySpeedway : 6,
TheBrrrgh : 8,
DaisyGardens : 8,
FunnyFarm : 8,
DonaldsDreamland : 8,
OutdoorZone : 8,
BossbotHQ : 12,
SellbotHQ : 9,
CashbotHQ : 10,
LawbotHQ : 11,
GolfZone : 8,
PartyHood : 13,
}
# town streets to download phases
streetPhaseMap = {
ToontownCentral : 5,
DonaldsDock : 6,
MinniesMelodyland: 6,
GoofySpeedway : 6,
TheBrrrgh : 8,
DaisyGardens : 8,
FunnyFarm : 8,
DonaldsDreamland : 8,
OutdoorZone : 8,
BossbotHQ : 12,
SellbotHQ : 9,
CashbotHQ : 10,
LawbotHQ : 11,
PartyHood : 13,
}
# Maps hoods to download phases
dnaMap = {
Tutorial : "toontown_central",
ToontownCentral : "toontown_central",
DonaldsDock : "donalds_dock",
MinniesMelodyland: "minnies_melody_land",
GoofySpeedway : "goofy_speedway",
TheBrrrgh : "the_burrrgh",
DaisyGardens : "daisys_garden",
FunnyFarm : "not done yet",
DonaldsDreamland : "donalds_dreamland",
OutdoorZone : "outdoor_zone",
BossbotHQ : "cog_hq_bossbot",
SellbotHQ : "cog_hq_sellbot",
CashbotHQ : "cog_hq_cashbot",
LawbotHQ : "cog_hq_lawbot",
GolfZone : "golf_zone",
}
# Maps hoods to names
hoodNameMap = {
DonaldsDock : TTLocalizer.DonaldsDock,
ToontownCentral : TTLocalizer.ToontownCentral,
TheBrrrgh : TTLocalizer.TheBrrrgh,
MinniesMelodyland: TTLocalizer.MinniesMelodyland,
DaisyGardens : TTLocalizer.DaisyGardens,
OutdoorZone : TTLocalizer.OutdoorZone,
FunnyFarm : TTLocalizer.FunnyFarm,
GoofySpeedway : TTLocalizer.GoofySpeedway,
DonaldsDreamland : TTLocalizer.DonaldsDreamland,
BossbotHQ : TTLocalizer.BossbotHQ,
SellbotHQ : TTLocalizer.SellbotHQ,
CashbotHQ : TTLocalizer.CashbotHQ,
LawbotHQ : TTLocalizer.LawbotHQ,
Tutorial : TTLocalizer.Tutorial,
MyEstate : TTLocalizer.MyEstate,
GolfZone : TTLocalizer.GolfZone,
PartyHood : TTLocalizer.PartyHood
}
# map of number of things to load per zone
safeZoneCountMap = {
MyEstate : 8,
Tutorial : 6,
ToontownCentral : 6,
DonaldsDock : 10,
MinniesMelodyland: 5,
GoofySpeedway : 500,
TheBrrrgh : 8,
DaisyGardens : 9,
FunnyFarm : 500,
DonaldsDreamland : 5,
OutdoorZone : 500,
GolfZone : 500,
PartyHood : 500,
}
townCountMap = {
# HACK! JNS just guessed at a tutorial count.
# and SDN guessed at Estate count
MyEstate : 8,
Tutorial : 40,
ToontownCentral : 37,
DonaldsDock : 40,
MinniesMelodyland: 40,
GoofySpeedway : 40,
TheBrrrgh : 40,
DaisyGardens : 40,
FunnyFarm : 40,
DonaldsDreamland : 40,
OutdoorZone : 40,
PartyHood : 20,
}
hoodCountMap = {
MyEstate : 2,
Tutorial : 2,
ToontownCentral : 2,
DonaldsDock : 2,
MinniesMelodyland: 2,
GoofySpeedway : 2,
TheBrrrgh : 2,
DaisyGardens : 2,
FunnyFarm : 2,
DonaldsDreamland : 2,
OutdoorZone : 2,
BossbotHQ : 2,
SellbotHQ : 43,
CashbotHQ : 2,
LawbotHQ : 2,
GolfZone : 2,
PartyHood : 2,
}
# Number of buildings you must have in your name to earn stars.
# Note - these have gone up since the credit is based on the size of the building now
TrophyStarLevels = (
10, # A bronze star
20, # A spinning bronze star
30, # A silver star
50, # A spinning silver star
75, # A gold star
100, # A spinning gold star
)
TrophyStarColors = (
Vec4(0.9, 0.6, 0.2, 1), # A bronze star
Vec4(0.9, 0.6, 0.2, 1), # A bronze star
Vec4(0.8, 0.8, 0.8, 1), # A silver star
Vec4(0.8, 0.8, 0.8, 1), # A silver star
Vec4(1, 1, 0, 1), # A gold star
Vec4(1, 1, 0, 1), # A gold star
)
# OTPGlobals
"""
ToonStandableGround = 0.707 # if ToonStandableGround > angle: toon is on ground.
ToonForwardSpeed = 16.0 # feet per second
ToonJumpForce = 24.0 # feet per second
ToonReverseSpeed = 8.0 # feet per second
ToonRotateSpeed = 80.0
# When you are "dead"
ToonForwardSlowSpeed = 6.0
ToonJumpSlowForce = 4.0 # feet per second
ToonReverseSlowSpeed = 2.5
ToonRotateSlowSpeed = 33.0
"""
MickeySpeed = 5.0 # feet per second
VampireMickeySpeed = 1.15
# VampireMickeySpeed = 5.15
MinnieSpeed = 3.2 # feet per second
WitchMinnieSpeed = 1.8
# DonaldSpeed = 4.6 # feet per second
DonaldSpeed = 3.68 # feet per second
DaisySpeed = 2.3 # feet per second
GoofySpeed = 5.2 # feet per second
SuperGoofySpeed = 1.6 # fps
PlutoSpeed = 5.5 # feet per second per second
WesternPlutoSpeed = 3.2
ChipSpeed = 3 # feet per second
DaleSpeed = 3.5 # feet per second
DaleOrbitDistance = 3 # feet
SuitWalkSpeed = 4.8
# The various pre-defined pieCode values in the world. These are used
# in the throw-a-pie-wherever-you-want interface, particularly in the
# final boss battle.
PieCodeBossCog = 1
PieCodeNotBossCog = 2
PieCodeToon = 3
PieCodeBossInsides = 4
PieCodeDefensePan = 5 # defense pan for lawbot boss battle
PieCodeProsecutionPan = 6 # prosecution pan for lawbot boss battle
PieCodeLawyer = 7 # prosecution lawyers for lawbot boss battle
# And the splat colors, if any, that correspond to a hit on any of the
# above.
PieCodeColors = {
PieCodeBossCog : None, # A successful hit on the boss cog is in color.
PieCodeNotBossCog: (0.8, 0.8, 0.8, 1),
PieCodeToon : None, # hitting a toon is also in color.
}
BossCogRollSpeed = 7.5
BossCogTurnSpeed = 20
BossCogTreadSpeed = 3.5
# Boss Cog attack codes:
BossCogDizzy = 0
BossCogElectricFence = 1
BossCogSwatLeft = 2
BossCogSwatRight = 3
BossCogAreaAttack = 4
BossCogFrontAttack = 5
BossCogRecoverDizzyAttack = 6
BossCogDirectedAttack = 7
BossCogStrafeAttack = 8
BossCogNoAttack = 9
BossCogGoonZap = 10
BossCogSlowDirectedAttack = 11
BossCogDizzyNow = 12
BossCogGavelStomp = 13
BossCogGavelHandle = 14
BossCogLawyerAttack = 15
BossCogMoveAttack = 16
BossCogGolfAttack = 17
BossCogGolfAreaAttack = 18
BossCogGearDirectedAttack = 19
BossCogOvertimeAttack = 20
# The amount of time it takes to play each attack.
BossCogAttackTimes = {
BossCogElectricFence : 0,
BossCogSwatLeft : 5.5,
BossCogSwatRight : 5.5,
BossCogAreaAttack : 4.21,
BossCogFrontAttack : 2.65,
BossCogRecoverDizzyAttack: 5.1,
BossCogDirectedAttack : 4.84,
BossCogNoAttack : 6,
BossCogSlowDirectedAttack: 7.84,
BossCogMoveAttack : 3,
BossCogGolfAttack : 6,
BossCogGolfAreaAttack : 7,
BossCogGearDirectedAttack: 4.84,
BossCogOvertimeAttack : 5,
}
# The damage that each attack applies to a Toon.
BossCogDamageLevels = {
BossCogElectricFence : 1,
BossCogSwatLeft : 5,
BossCogSwatRight : 5,
BossCogAreaAttack : 10,
BossCogFrontAttack : 3,
BossCogRecoverDizzyAttack: 3,
BossCogDirectedAttack : 3,
BossCogStrafeAttack : 2,
BossCogGoonZap : 5,
BossCogSlowDirectedAttack: 10,
BossCogGavelStomp : 20,
BossCogGavelHandle : 2,
BossCogLawyerAttack : 5,
BossCogMoveAttack : 20,
BossCogGolfAttack : 15,
BossCogGolfAreaAttack : 15,
BossCogGearDirectedAttack: 15,
BossCogOvertimeAttack : 10,
}
# Where are the Boss Cog's battles relative to him?
BossCogBattleAPosHpr = (0, -25, 0, 0, 0, 0)
BossCogBattleBPosHpr = (0, 25, 0, 180, 0, 0)
# How many pie hits does it take to kill the Sellbot VP?
SellbotBossMaxDamage = 100
# Where is the Sellbot Boss sitting in the three stages of the
# VP sequence?
SellbotBossBattleOnePosHpr = (0, -35, 0, -90, 0, 0)
SellbotBossBattleTwoPosHpr = (0, 60, 18, -90, 0, 0)
SellbotBossBattleThreeHpr = (180, 0, 0)
SellbotBossBottomPos = (0, -110, -6.5)
SellbotBossDeathPos = (0, -175, -6.5)
# Where do the VP's doobers walk to?
SellbotBossDooberTurnPosA = (-20, -50, 0)
SellbotBossDooberTurnPosB = (20, -50, 0)
SellbotBossDooberTurnPosDown = (0, -50, 0)
SellbotBossDooberFlyPos = (0, -135, -6.5)
# How does the VP roll up the ramp?
SellbotBossTopRampPosA = (-80, -35, 18)
SellbotBossTopRampTurnPosA = (-80, 10, 18)
SellbotBossP3PosA = (-50, 40, 18)
SellbotBossTopRampPosB = (80, -35, 18)
SellbotBossTopRampTurnPosB = (80, 10, 18)
SellbotBossP3PosB = (50, 60, 18)
# How many points does it take to kill the Cashbot CFO?
CashbotBossMaxDamage = 500
# Where is the Cashbot Boss sitting in the CFO sequence?
CashbotBossOffstagePosHpr = (120, -195, 0, 0, 0, 0)
CashbotBossBattleOnePosHpr = (120, -230, 0, 90, 0, 0)
CashbotRTBattleOneStartPosHpr = (94, -220, 0, 110, 0, 0)
CashbotBossBattleThreePosHpr = (120, -315, 0, 180, 0, 0)
# Where are the starting points for the toons in battle 3?
CashbotToonsBattleThreeStartPosHpr = [
(105, -285, 0, 208, 0, 0),
(136, -342, 0, 398, 0, 0),
(105, -342, 0, 333, 0, 0),
(135, -292, 0, 146, 0, 0),
(93, -303, 0, 242, 0, 0),
(144, -327, 0, 64, 0, 0),
(145, -302, 0, 117, 0, 0),
(93, -327, 0, -65, 0, 0),
]
# How many safes in the final battle sequence, and where are they?
CashbotBossSafePosHprs = [
(120, -315, 30, 0, 0, 0), # safe 0 is special; it drops on from above.
(77.2, -329.3, 0, -90, 0, 0),
(77.1, -302.7, 0, -90, 0, 0),
(165.7, -326.4, 0, 90, 0, 0),
(165.5, -302.4, 0, 90, 0, 0),
(107.8, -359.1, 0, 0, 0, 0),
(133.9, -359.1, 0, 0, 0, 0),
(107.0, -274.7, 0, 180, 0, 0),
(134.2, -274.7, 0, 180, 0, 0),
]
# How many cranes, and where are they?
CashbotBossCranePosHprs = [
(97.4, -337.6, 0, -45, 0, 0),
(97.4, -292.4, 0, -135, 0, 0),
(142.6, -292.4, 0, 135, 0, 0),
(142.6, -337.6, 0, 45, 0, 0),
]
# How long does it take an object to fly from the ground to the magnet?
CashbotBossToMagnetTime = 0.2
# And how long to straighten out when dropped?
CashbotBossFromMagnetTime = 1
# How much impact does it take to hit the | |
def get_context_data(self, **kwargs):
courselet_pk = self.kwargs.get('courselet_pk')
course = self.get_course()
if courselet_pk:
courselet = CourseUnit.objects.get(id=courselet_pk)
else:
courselet = CourseUnit.objects.filter(course=course).first()
# TODO: Cover this
kwargs['invites'] = Invite.objects.my_invites(request=self.request).filter(
enroll_unit_code=EnrollUnitCode.get_code(courselet, give_instance=True, isTest=True)) # pragma: no cover
kwargs['invite_tester_form'] = self.form_class(
initial={
'type': 'tester',
'course': self.get_course(),
}
)
kwargs.update({
'u_lessons': self.get_units_by_courselet(courselet)
})
if waffle.switch_is_active('ctms_invite_students'):
# We no longer need a form
# kwargs['invite_student_form'] = self.form_class(initial={'type': 'student', 'course': self.get_course()})
if courselet:
kwargs['enroll_code'] = EnrollUnitCode.get_code(courselet)
kwargs['courselet'] = courselet
kwargs['course'] = course
kwargs['domain'] = '{0}://{1}'.format(self.request.scheme, Site.objects.get_current().domain)
kwargs['courselets_email'] = settings.COURSELETS_EMAIL
return kwargs
def get_form_kwargs(self):
kwargs = super(InvitesListView, self).get_form_kwargs()
kwargs['course'] = self.get_course()
kwargs['instructor'] = self.request.user.instructor
# TODO: Cover this
kwargs['enroll_unit_code'] = EnrollUnitCode.get_code(self.kwargs.get('courselet_pk'),
give_instance=True, isTest=True) # pragma: no cover
return kwargs
def get_initial(self):
return {
'course': self.get_course(),
}
def form_valid(self, form):
# There's no longer a form on the students invitation page
# if form.cleaned_data['type'] == 'student' and not waffle.switch_is_active('ctms_invite_students'):
# # if type - student and ctms_invite_students is disabled
# messages.add_message(
# self.request, messages.WARNING, "You can not send invitations to students yet"
# )
# return self.form_invalid(form)
response = super(InvitesListView, self).form_valid(form)
self.object.send_mail(self.request, self)
messages.add_message(self.request, messages.SUCCESS, "Invitation successfully sent")
return response
def form_invalid(self, form):
response = super(InvitesListView, self).form_invalid(form)
messages.add_message(self.request, messages.WARNING,
"Invitation could not be sent because of errors listed below")
return response
class JoinCourseView(CourseCoursletUnitMixin, View): # NewLoginRequiredMixin
NEED_INSTRUCTOR = False
def get(self, *args, **kwargs):
invite = get_object_or_404(Invite, code=self.kwargs['code'])
if self.request.user.is_authenticated and invite.email != self.request.user.email:
logout(self.request)
if self.request.user.is_authenticated:
if invite.user and invite.user == self.request.user or invite.email == self.request.user.email:
# if user is a person for whom this invite
if invite.type == 'tester':
messages.add_message(self.request, messages.SUCCESS,
"You just joined course as tester")
invite.status = 'joined'
invite.save()
if invite.enroll_unit_code:
return redirect(reverse('chat:tester_chat_enroll',
kwargs={'enroll_key': invite.enroll_unit_code.enrollCode}))
else:
return redirect(reverse('lms:tester_course_view', kwargs={'course_id': invite.course.id}))
# TODO: It seems to be no longer needed owing to absent invites for students
# elif invite.type == 'student':
# messages.add_message(self.request, messages.SUCCESS,
# "You just joined course as student")
# invite.status = 'joined'
# invite.save()
# if invite.enroll_unit_code:
# return redirect(reverse('chat:chat_enroll',
# kwargs={'enroll_key': invite.enroll_unit_code.enrollCode}))
# else:
# return redirect(reverse('lms:course_view', kwargs={'course_id': invite.course.id}))
# if user is not owned this invite
return HttpResponseRedirect("{}?next={}".format(reverse('new_login'), self.request.path))
else:
u_hash = uuid4().hex
self.request.session['u_hash'] = u_hash
kwargs = dict(available_backends=load_backends(settings.AUTHENTICATION_BACKENDS))
kwargs['u_hash'] = u_hash
kwargs['next'] = self.request.path
invite = get_object_or_404(Invite, code=self.kwargs['code'])
init_data = {'next': kwargs['next'], 'email': invite.email, 'u_hash': kwargs['u_hash']}
if invite.user:
# user already registered
# show login page
kwargs['form'] = EmailLoginForm(initial=init_data)
template_name = 'psa/new_custom_login.html'
else:
# user not yet registered
# show signup page
# try to find user with email
user = invite.search_user_by_email(invite.email)
if user:
invite.user = user
invite.save()
kwargs['form'] = EmailLoginForm(initial=init_data)
template_name = 'psa/new_custom_login.html'
else:
kwargs['form'] = SignUpForm(initial=init_data)
template_name = 'psa/signup.html'
return self.render(template_name, kwargs)
class ResendInviteView(NewLoginRequiredMixin, CourseCoursletUnitMixin, View):
def post(self, request, code):
invite = get_object_or_404(Invite, code=code)
if invite.course.addedBy != self.request.user and not Role(course=invite.course, role=Role.INSTRUCTOR).exists():
raise Http404()
response = invite.send_mail(self.request, self)
messages.add_message(self.request, messages.SUCCESS,
"We just resent invitation to {}".format(invite.email))
return json_response(response)
class DeleteInviteView(NewLoginRequiredMixin, CourseCoursletUnitMixin, DeleteView):
query_pk_and_slug = True
slug_url_kwarg = 'code'
slug_field = 'code'
pk_url_kwarg = 'pk'
model = Invite
def get_queryset(self, queryset=None):
if queryset:
return queryset.my_invitest(self.request)
return Invite.objects.my_invites(self.request)
def get_success_url(self):
kwargs = {
'pk': self.get_object().course.id,
'courselet_pk': self.get_object().enroll_unit_code.courseUnit.unit.id
}
return reverse('ctms:courselet_invite', kwargs=kwargs)
def delete(self, request, *args, **kwargs):
response = super(DeleteInviteView, self).delete(request, *args, **kwargs)
messages.add_message(self.request, messages.SUCCESS, "Invite successfully deleted")
return response
class EmailSentView(TemplateView): # NewLoginRequiredMixin , CourseCoursletUnitMixin ?
template_name = 'ctms/email_sent.html'
def get_context_data(self, **kwargs):
kw = super(EmailSentView, self).get_context_data(**kwargs)
kw.update({'resend_user_email': self.request.session.get('resend_user_email')})
return kw
class ReorderUnits(NewLoginRequiredMixin, CourseCoursletUnitMixin, View):
def post(self, request, course_pk, courselet_pk):
# new ordered ids are in request.POST['ordered_ids']
data = json.loads(request.POST.get('data') or '{}')
ids = [int(i) for i in data.get('ordered_ids')]
if not ids:
return JsonResponse({'ok': 0, 'err': 'empty'})
order = list(range(len(ids)))
id_order = dict(list(zip(ids, order)))
# check that all ids are unique
if len(set(ids)) != len(ids):
raise JsonResponse({'ok': 0, 'err': 'not uniq'})
courselet = self.get_courslet()
units = self.get_units_by_courselet(courselet)
old_ids = units.values_list('id', flat=True)
old_order = units.values_list('order', flat=True)
old_id_order = dict(list(zip(old_ids, old_order)))
# check that all provided ids are correct
for _id in ids:
if _id not in old_ids:
raise JsonResponse({'ok': 0, 'err': 'not correct ids'})
for unit in units:
_id = unit.id
order = id_order[_id]
if old_id_order[_id] == order:
continue
unit.order = order
unit.save()
cache.delete(memoize.cache_key('get_units_by_courselet', courselet))
return JsonResponse({'ok': 1, 'msg': 'Order has been changed!'})
class Onboarding(NewLoginRequiredMixin, TemplateView):
template_name = 'ctms/onboarding.html'
def get_context_data(self, **kwargs):
context = super(Onboarding, self).get_context_data(**kwargs)
users_course = Course.objects.filter(addedBy=self.request.user).last()
users_courselet = CourseUnit.objects.filter(
addedBy=self.request.user,
course=users_course
).last()
users_thread = Lesson.objects.filter(addedBy=self.request.user, kind=Lesson.ANSWER).last()
introduction_course_id = get_onboarding_setting(onboarding.INTRODUCTION_COURSE_ID)
introduction_courselet_id = get_onboarding_setting(onboarding.INTRODUCTION_COURSELET_ID)
course = Course.objects.filter(id=introduction_course_id).first()
enroll_unit_code = EnrollUnitCode.objects.filter(
courseUnit=introduction_courselet_id,
isLive=False, isPreview=False, isTest=False
).first()
if not enroll_unit_code:
logger.warning('value: ONBOARDING_INTRODUCTION_COURSELET_ID = {} for courselet - not found!'.format(
introduction_courselet_id))
enroll_url = '/chat/enrollcode/{}'.format(
enroll_unit_code.enrollCode or EnrollUnitCode.get_code(enroll_unit_code.courseUnit)
) if enroll_unit_code else '#'
context.update(dict(
introduction_course=course,
users_course=users_course,
users_courselet=users_courselet,
users_thread=users_thread,
enroll_url=enroll_url
))
status = get_onboarding_status_with_settings(self.request.user.id)
steps = {
key: status.get(key) for key in get_onboarding_steps()
}
context.update(**steps)
return context
def get(self, request, *args, **kwargs):
response = super(Onboarding, self).get(kwargs)
if not waffle.switch_is_active('ctms_onboarding_enabled'):
return redirect('ctms:my_courses')
return response
class OnboardingBP1(TemplateView):
template_name = 'ctms/onboarding_bp1.html'
model = BestPractice1
initial_data = {
'student_count': 200,
'misconceptions_count': 5,
'question_count': 24,
'mean_percent': 72
}
form = BestPractice1Form
def get_context_data(self, **kwargs):
context = super(OnboardingBP1, self).get_context_data(**kwargs)
_id = int(time.mktime(datetime.now().timetuple()))
user = self.request.user
if isinstance(user, AnonymousUser):
user = User.objects.get_or_create(username='anonymous' + str(_id),
first_name='Temporary User')[0]
temporary_group, created = Group.objects.get_or_create(name='Temporary')
user.groups.add(temporary_group)
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(self.request, user)
# Set expiry time to year in future
one_year = 31536000
self.request.session.set_expiry(one_year)
initial_data = self.model.objects.filter(user=user).values().last()
if not initial_data:
initial_data = self.initial_data
form = self.form(initial=initial_data)
context.update({
'form': form,
'pdf_form': BestPractice1PdfForm(initial_data),
'form_data': initial_data,
'available_backends': load_backends(settings.AUTHENTICATION_BACKENDS)
})
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
instance = BestPractice1.objects.filter(user=request.user.id).first()
form = BestPractice1PdfForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
return redirect('ctms:onboarding')
context.update({
'pdf_form': form,
})
return render(request, self.template_name, context)
class OnboardingBP2(OnboardingBP1):
template_name = 'ctms/onboarding_bp2.html'
model = BestPractice2
initial_data = {
'percent_engaged': 25
}
form = BestPractice2Form
class BestPracticesCourseView(NewLoginRequiredMixin, ListView):
context_object_name = 'best_practices_templates'
template_name = 'ctms/course_best_practices.html'
model = BestPracticeTemplate
queryset = BestPracticeTemplate.objects.filter(scope=BestPracticeTemplate.COURSE)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['course'] = self.get_course()
return context
def get_course(self, queryset=None):
if 'pk' in self.kwargs:
course = Course.objects.filter(
models.Q(id=self.kwargs.get('pk')) &
(
models.Q(addedBy=self.request.user) |
models.Q(role__role=Role.INSTRUCTOR, role__user=self.request.user)
)
).distinct().first()
if not course:
raise Http404()
return course
class BestPracticesCourseletView(NewLoginRequiredMixin, CourseCoursletUnitMixin, ListView):
context_object_name = 'best_practices_templates'
template_name = 'ctms/courselet_best_practices.html'
model = BestPracticeTemplate
queryset = BestPracticeTemplate.objects.filter(scope=BestPracticeTemplate.COURSELET)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
courselet = self.get_courselet()
context['courselet'] = courselet
context.update({
'u_lessons': self.get_units_by_courselet(courselet)
})
return context
def get_courselet(self, queryset=None):
if 'courselet_pk' in self.kwargs:
courselet = CourseUnit.objects.filter(
models.Q(id=self.kwargs.get('courselet_pk')) &
(
models.Q(addedBy=self.request.user) |
models.Q(course__role__role=Role.INSTRUCTOR, course__role__user=self.request.user) |
models.Q(course__addedBy=self.request.user)
)
).distinct().first()
if not courselet:
raise Http404()
return courselet
class BestPracticeCalculation(NewLoginRequiredMixin, DetailView):
model = BestPractice
template_name = 'ctms/best_practice_calculation.html'
course = 'course'
courselet = 'courselet'
def get_context_data(self, **kwargs):
"""
Get context for BP calculation.
Notes:
- reorder fields by order key
Example:
{
"field1": {
.....
"order": 1,
.....
},
"field2": {
.....
"order": 2,
.....
}
}
"""
context = super().get_context_data(**kwargs)
context['input_data'] = {
field[0]: field[1] for field in
sorted(self.object.template.calculation.items(), key=lambda x: x[1].get('order'))}
context['best_practice_template_id'] = self.object.template.id
context['best_practice_data'] = self.object.data or {}
context['course'] = self.object.course
return context
class BestPracticeActivation(DetailView):
model = BestPractice
template_name = 'ctms/best_practice_activation.html'
course = 'course'
courselet = 'courselet'
def create_courselet(self, *args, **kwargs) -> HttpResponseRedirect:
ob = CreateCourseletForm({'title': self.request.POST.get('exam_name')}).save(commit=False)
ob.addedBy = self.request.user
ob.save()
ob.course_unit = CourseUnit.objects.create(
unit=ob,
course=kwargs.get('course'),
addedBy=self.request.user,
order=0,
)
ob.save()
best_practice = kwargs.get('best_practice')
best_practice.courselet = ob.course_unit
best_practice.save()
return reverse('ctms:courselet_best_practice',
kwargs={
'course_pk': kwargs.get('course').id,
'courselet_pk': ob.course_unit.id
})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['activation_data'] = self.object.template.activation if self.object.template.activation else {}
return context
def post(self, request, *args, **kwargs) -> HttpResponseRedirect:
best_practice = self.get_object()
course = get_object_or_404(Course, id=kwargs.get('course_pk'))
if best_practice.template.scope == self.course:
target = reverse('ctms:course_best_practice',
kwargs={'pk': kwargs.get('course_pk')})
if BestPracticeTemplate.objects.filter(id=best_practice.template.id, activation__action__isnull=False).exists() and \
hasattr(self, best_practice.template.activation.get('action')):
# TODO rewrite this to get action code authomatically
action = getattr(self, best_practice.template.activation.get('action'))
target = action(best_practice=best_practice, course=course)
elif course.courseunit_set.exists():
courselet = course.courseunit_set.first()
target = reverse('ctms:courselet_best_practice',
kwargs={
'course_pk': kwargs.get('course_pk'),
'courselet_pk': courselet.id
})
else:
target = reverse('ctms:courslet_create', kwargs={'course_pk': kwargs.get('course_pk')})
elif best_practice.template.scope == self.courselet:
target = reverse('ctms:courselet_best_practice',
kwargs={'course_pk': kwargs.get('course_pk'), 'courselet_pk': kwargs.get('courselet_pk')})
if BestPracticeTemplate.objects.filter(id=best_practice.template.id, activation__action__isnull=False).exists() and \
hasattr(self, best_practice.template.activation.get('action')):
action = getattr(self, best_practice.template.activation.get('action'))
target = action(best_practice=best_practice, course=course)
data = best_practice.data or {}
data.update(request.POST.dict())
del data['csrfmiddlewaretoken']
best_practice.active, best_practice.data = True, data
best_practice.save()
course.apply_from(data, commit=True)
best_practice.courselet.unit.apply_from(data, commit=True) if best_practice.courselet else None
if 'upload_file' in self.request.FILES:
bp_form = UploadFileBPForm(self.request.POST, | |
if len(self.for_dequeue) == 0:
self._carry_over(self.for_enqueue, self.for_dequeue)
return self.for_dequeue.pop()
def _carry_over(self, src, dest):
while len(src) != 0:
dest.push(src.pop())
return MyQueue
def problem_3_6(stack):
""" Write a program to sort a stack in ascending order. You should not make
any assumptions about how the stack is implemented. The following are the
only functions that should be used to write this program:
push | pop | peek | isEmpty.
Solution #1: use one more stack.
Solution #2: no additional stack but using recursion, first recurse on the
substack without the last element to bring the max to top, then compare
top two elements. This is bubble-sort.
"""
def move_last(stack):
""" Finds the max element in stack and moves it to to the top. """
if len(stack) <= 1:
return stack
last = stack.pop()
stack = move_last(stack)
previous = stack.peek()
if previous > last:
previous = stack.pop()
stack.push(last)
stack.push(previous)
else:
stack.push(last)
return stack
def stack_sort(stack):
if stack.is_empty():
return stack
stack = move_last(stack)
last = stack.pop()
stack = stack_sort(stack)
stack.push(last)
return stack
return stack_sort(stack)
# Chapter 4: Trees and Graphs
class TreeNode(object):
""" A node in a tree. """
def __init__(self, key):
self.key = key
self.parent = None
self.children = []
def is_leaf(self):
return len(self.children) == 0
def problem_4_1(root):
""" Implement a function to check if a tree is balanced. For the purposes
of this question, a balanced tree is defined to be a tree such that no two
leaf nodes differ in distance from the root by more than one.
Solution: modify Pre-Order Traversal to collect the depth of each leaf.
In this case we chose to create an explicit stack instead of using recursion.
Another way is to count the disting depths for leaves. There should be only 2.
"""
max_leaf_depth = None
node_stack = [(root, 0)]
leaf_depths = set([])
while len(node_stack) != 0:
(node, depth) = node_stack.pop()
if node.is_leaf():
leaf_depths.add(depth)
else:
for child in node.children:
node_stack.append((child, depth + 1))
return max(leaf_depths) - min(leaf_depths) <= 1
class GraphVertex(object):
def __init__(self, key):
self.key = key
self.adjacent = []
def problem_4_2(start, end):
""" Given a directed graph, design an algorithm to find out whether there
is a route between two nodes.
Solution: Depth-First Search.
"""
def bfs(start):
vertex_stack = [start]
explored_vertices = set([])
while len(vertex_stack) != 0:
vertex = vertex_stack.pop()
explored_vertices.add(vertex.key)
for neighbour_vertex in vertex.adjacent:
if neighbour_vertex.key not in explored_vertices:
vertex_stack.append(neighbour_vertex)
return explored_vertices
explored_nodes = bfs(start)
return end.key in explored_nodes
def problem_4_3(sorted_arr):
""" Given a sorted (increasing order) array, write an algorithm to create
a binary tree with minimal height.
Solution: minimal height implies perfectly ballanced.
"""
def build_node(arr, left, right):
if left > right:
return None
middle = int(float(left + right) / 2)
node = TreeNode(arr[middle])
left = build_node(arr, left, middle - 1)
right = build_node(arr, middle + 1, right)
node.children = [left, right]
return node
return build_node(sorted_arr, 0, len(sorted_arr) - 1)
def problem_4_4(tree):
""" Given a binary search tree, design an algorithm which creates a linked
list of all the nodes at each depth (i.e., if you have a tree with depth D,
you’ll have D linked lists).
Solution: use pre-order traversal to pass through each node and it's depth.
"""
linked_lists_start = []
linked_lists_end = []
node_queue = deque([(tree, 0)])
while len(node_queue) != 0:
(node, depth) = node_queue.popleft()
linked_list_node = SingleLinkedListNode(node)
if depth == len(linked_lists_start):
linked_lists_start.append(linked_list_node)
linked_lists_end.append(linked_list_node)
else:
linked_lists_end[depth].next = linked_list_node
linked_lists_end[depth] = linked_list_node
for child in node.children:
node_queue.append((child, depth + 1))
return linked_lists_start
class BinaryTreeNode(object):
def __init__(self, key):
self.key = key
self.parent = None
self.left = None
self.right = None
def problem_4_5(tree):
""" Write an algorithm to find the ‘next’ node (i.e., in-order successor)
of a given node in a binary search tree where each node has a link to its
parent.
Solution: There are two cases:
1. the node has a right subtree, in which case his immediate successor is
the smallest node in the right subtree.
2. if the node has an empty right subtree, then go up the ancestors until
you reach a right one. If you reach the root, then the node has no successor
because it's the max node.
"""
def get_min(node):
if node.left == None:
return node
return get_min(node.left)
def successor(node):
if node.right != None:
return get_min(node.right)
while node != None:
if node.parent.left == node:
return node.parent
node = node.parent
return None
return successor(tree)
def problem_4_6(node1, node2):
""" Design an algorithm and write code to find the first common ancestor of
two nodes in a binary tree. Avoid storing additional nodes in a data
structure. NOTE: This is not necessarily a binary search tree.
Solution: traverse from n1 up to the root; for each ancestor, start traversing
from n2 up to the root until you you hit n1's parent of the root.
Alternative solution (not the implementation below): at the very least the
root will be the common ancestor of the two nodes. So, build two linked lists
with all the nodes on the path from each node to the root. Then traverse the
two lists until the last element which is common. This uses extra space!
"""
n1 = node1
while n1 != None:
n2 = node2
while n2 != None:
if n2 == n1:
return n1
n2 = n2.parent
n1 = n1.parent
return None
def problem_4_7(T1, T2):
""" You have two very large binary trees: T1, with millions of nodes, and
T2, with hundreds of nodes. Create an algorithm to decide if T2 is a
subtree of T1.
"""
def is_identical(t1, t2):
if t1 == None and t2 == None:
return True
if t1 == None or t2 == None:
return False
return t1.key == t2.key and \
is_identical(t1.left, t2.left) and \
is_identical(t1.right, t2.right)
def is_subtree(t1, t2):
""" Check if t2 is subtree of t1. """
if t1 == None and t2 == None:
return True
if t1 == None or t2 == None:
return False
if t1.key == t2.key:
if is_identical(t1, t2) == True:
return True
return is_subtree(t1.left, t2) or is_subtree(t1.right, t2)
return is_subtree(T1, T2)
def problem_4_8(tree, value):
""" You are given a binary tree in which each node contains a value. Design
an algorithm to print all paths which sum up to that value. Note that it
can be any path in the tree, it does not have to start at the root.
"""
def find_paths(node, s):
""" Retrive all paths that sum to s and start with node.
Once it finds a path it can stop!
Returns:
list, of lists, of keys
"""
if node == None:
return []
paths = []
if node.key == s:
paths.append([])
paths.extend(find_paths(node.left, s - node.key))
paths.extend(find_paths(node.right, s - node.key))
for p in paths:
p.insert(0, node.key)
return paths
def traverse(node, s):
""" Find all paths in the tree rooted in node with sum up to s.
Returns:
list, of lists of keys which sum up to s.
"""
if node == None:
return
sums = find_paths(node, s)
if node.left != None:
sums.extend(traverse(node.left, s))
if node.right != None:
sums.extend(traverse(node.right, s))
return sums
return traverse(tree, value)
# Chapter 5: Bit Manipulation
def problem_5_1(n, m, i, j):
""" You are given two 32-bit numbers, N and M, and two bit positions, i and
j. Write a method to set all bits between i and j in N equal to M (e.g., M
becomes a substring of N located at i and starting at j).
Example:
Input: N = 10000000000, M = 10101, i = 2, j = 6
Output: N = 10001010100
"""
x = ((2**(len(bin(n))-j)-1) << (j+1)) + (2**i - 1)
return n & x | m << i
def problem_5_2(n):
""" Given a (decimal - e.g. 3.72) number that is passed in as a string,
print the binary representation. If the number can not be represented
accurately in binary, print ERROR.
"""
out = ''
# Find the largest power of 2.
i = 0
while 2**i < n:
i += 1
i | |
and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = TokenSequenceEncoder(default_length=10, limit_vocabulary=10)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (1 chars)
np.testing.assert_array_equal(
encoded_test_sentences[0, :1],
np.array([encoder.padding_token_index]*1))
# first word is '<START>'
assert encoded_test_sentences[0, 1] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 5
# thord word is 'is' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 4] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 7] == 7
# eighth word is 'language' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 8] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[0, 9] == encoder.end_token_index
# padding with '<PAD>' (7 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :7],
np.array([encoder.padding_token_index]*7))
# first word after is '<START>'
assert encoded_test_sentences[1, 7] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 8] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 9] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<START>", "python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>",
"<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["and"]
def test_padded_sequence_encoder_limit_vocab_and_top_words():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (2 chars)
np.testing.assert_array_equal(
encoded_test_sentences[0, :1],
np.array([encoder.padding_token_index]))
# first word is '<START>'
assert encoded_test_sentences[0, 1] == encoder.start_token_index
# second word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 2] == encoder.oov_token_index
# third word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 3] == 7
# fourth word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# seventh word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 7] == 4
# eighth word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 8] == 8
# last word is '<END>'
assert encoded_test_sentences[0, 9] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :7],
np.array([encoder.padding_token_index]*7))
# first word after is '<START>'
assert encoded_test_sentences[1, 7] == encoder.start_token_index
# second word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 8] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[1, 9] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<START>", "<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming",
"language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "<OOV>", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["<OOV>"]
def test_padded_sequence_encoder_limit_vocab_and_top_words_no_start_end_token():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22,
add_start_end_indicators=False)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (3 token)
np.testing.assert_array_equal(
encoded_test_sentences[0, :3],
np.array([encoder.padding_token_index]*3))
# first word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# second word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 4] == 7
# third word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# fourth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# fifth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 7] == encoder.oov_token_index
# sixth word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 8] == 4
# seventh word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 9] == 8
# padding with '<PAD>' (9 token)
np.testing.assert_array_equal(
encoded_test_sentences[1, :9],
np.array([encoder.padding_token_index]*9))
# first word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 9] == encoder.oov_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming",
"language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<OOV>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>"]
def test_padded_sequence_encoder_limit_vocab_and_top_words_no_start_end_token_pad_end():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22,
add_start_end_indicators=False, pad_beginning=False)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# first word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 0] == encoder.oov_token_index
# second word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 1] == 7
# third word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 2] == encoder.oov_token_index
# fourth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# fifth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 5] == 4
# seventh word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 6] == 8
# padding with '<PAD>' (3 token)
np.testing.assert_array_equal(
encoded_test_sentences[0, -3:],
np.array([encoder.padding_token_index]*3))
# first word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 0] == encoder.oov_token_index
# padding with '<PAD>' (9 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, -9:],
np.array([encoder.padding_token_index]*9))
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language", "<PAD>", "<PAD>",
"<PAD>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<OOV>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", | |
-2740321. 0.0000000E+00 012104
6047375. 2297095. 0.0000000E+00 0.0000000E+00 63642.55 003104
-433045.5 -78139.93 0.0000000E+00 0.0000000E+00 -99830.28 300005
-258204.9 -6963.864 0.0000000E+00 0.0000000E+00 -150071.3 210005
932631.4 252438.4 0.0000000E+00 0.0000000E+00 -41604.31 120005
572208.1 139320.7 0.0000000E+00 0.0000000E+00 9618.409 030005
0.0000000E+00 0.0000000E+00 1069523. 249798.6 0.0000000E+00 201005
0.0000000E+00 0.0000000E+00 1310979. 366316.6 0.0000000E+00 111005
0.0000000E+00 0.0000000E+00 1360677. 323298.8 0.0000000E+00 021005
2130718. 594867.6 0.0000000E+00 0.0000000E+00 284457.2 102005
1030645. 279390.5 0.0000000E+00 0.0000000E+00 162570.4 012005
0.0000000E+00 0.0000000E+00 -144632.8 402.0765 0.0000000E+00 003005
-0.3368248E+12-0.1146664E+12 0.0000000E+00 0.0000000E+00 0.7657224E+10 200600
-0.5081769E+12-0.1765555E+12 0.0000000E+00 0.0000000E+00-0.9635114E+09 110600
-0.2288437E+12-0.8384577E+11 0.0000000E+00 0.0000000E+00-0.1363384E+11 020600
0.0000000E+00 0.0000000E+00-0.9783734E+11-0.7802681E+11 0.0000000E+00 101600
0.0000000E+00 0.0000000E+00-0.4210927E+11-0.1980049E+11 0.0000000E+00 011600
-0.6136732E+11-0.2484061E+11 0.0000000E+00 0.0000000E+00-0.1422966E+11 002600
0.0000000E+00 0.0000000E+00-0.1317150E+11-0.6684816E+10 0.0000000E+00 200501
0.0000000E+00 0.0000000E+00-0.3914004E+11-0.1586611E+11 0.0000000E+00 110501
0.0000000E+00 0.0000000E+00-0.2481132E+11-0.8360313E+10 0.0000000E+00 020501
0.1419356E+12 0.4257996E+11 0.0000000E+00 0.0000000E+00 0.2019861E+10 101501
0.7633211E+11 0.2266065E+11 0.0000000E+00 0.0000000E+00 0.2861882E+10 011501
0.0000000E+00 0.0000000E+00-0.1154974E+11 0.5765642E+10 0.0000000E+00 002501
-0.1809560E+11-0.5602657E+10 0.0000000E+00 0.0000000E+00-0.1516484E+10 200402
-0.2848094E+11-0.8874784E+10 0.0000000E+00 0.0000000E+00-0.2396815E+10 110402
-0.1107491E+11-0.3484768E+10 0.0000000E+00 0.0000000E+00-0.9677177E+09 020402
0.0000000E+00 0.0000000E+00-0.1123067E+11-0.4174161E+10 0.0000000E+00 101402
0.0000000E+00 0.0000000E+00-0.8376580E+10-0.3132043E+10 0.0000000E+00 011402
-0.4081773E+10-0.1145153E+10 0.0000000E+00 0.0000000E+00 0.3235563E+09 002402
0.0000000E+00 0.0000000E+00-0.1526199E+09-0.9380045E+08 0.0000000E+00 200303
0.0000000E+00 0.0000000E+00-0.2489594E+08-0.4426529E+08 0.0000000E+00 110303
0.0000000E+00 0.0000000E+00 0.2732984E+09 0.8049655E+08 0.0000000E+00 020303
0.1052568E+10 0.3196603E+09 0.0000000E+00 0.0000000E+00 0.2508135E+08 101303
0.7050269E+09 0.2192591E+09 0.0000000E+00 0.0000000E+00-0.1481134E+08 011303
0.0000000E+00 0.0000000E+00-0.6371331E+08 6152012. 0.0000000E+00 002303
-0.2020987E+09-0.5905280E+08 0.0000000E+00 0.0000000E+00-0.2402733E+08 200204
-0.2470431E+09-0.7324912E+08 0.0000000E+00 0.0000000E+00-0.2644767E+08 110204
-0.6563519E+08-0.2026683E+08 0.0000000E+00 0.0000000E+00 -5568992. 020204
0.0000000E+00 0.0000000E+00-0.7618228E+08-0.1643882E+08 0.0000000E+00 101204
0.0000000E+00 0.0000000E+00-0.6583044E+08-0.1478463E+08 0.0000000E+00 011204
-0.2309118E+08 -5752673. 0.0000000E+00 0.0000000E+00 -1146100. 002204
0.0000000E+00 0.0000000E+00 1287493. 427635.5 0.0000000E+00 200105
0.0000000E+00 0.0000000E+00 2136266. 728006.1 0.0000000E+00 110105
0.0000000E+00 0.0000000E+00 2454905. 876864.1 0.0000000E+00 020105
6500722. 2066683. 0.0000000E+00 0.0000000E+00 384868.3 101105
3910756. 1214993. 0.0000000E+00 0.0000000E+00 194123.6 011105
0.0000000E+00 0.0000000E+00 -1308001. -291897.2 0.0000000E+00 002105
-68573.56 -15670.06 0.0000000E+00 0.0000000E+00 -5975.401 200006
-70895.10 -16220.30 0.0000000E+00 0.0000000E+00 -6330.535 110006
4325.851 591.1550 0.0000000E+00 0.0000000E+00 -269.1406 020006
0.0000000E+00 0.0000000E+00 -81032.50 3442.553 0.0000000E+00 101006
0.0000000E+00 0.0000000E+00 -27165.24 6544.920 0.0000000E+00 011006
-55130.12 -13564.37 0.0000000E+00 0.0000000E+00 739.1821 002006
0.0000000E+00 0.0000000E+00-0.1255041E+11-0.1553706E+11 0.0000000E+00 100700
0.0000000E+00 0.0000000E+00 0.1720425E+10 0.1860944E+10 0.0000000E+00 010700
-0.1072923E+12-0.3941402E+11 0.0000000E+00 0.0000000E+00-0.1157819E+11 001700
0.6253754E+11 0.1924998E+11 0.0000000E+00 0.0000000E+00 0.2327151E+10 100601
0.3240680E+11 0.1000075E+11 0.0000000E+00 0.0000000E+00 0.2229289E+10 010601
0.0000000E+00 0.0000000E+00-0.4786455E+10 0.4282327E+10 0.0000000E+00 001601
0.0000000E+00 0.0000000E+00-0.3903854E+10-0.1425801E+10 0.0000000E+00 100502
0.0000000E+00 0.0000000E+00-0.2314700E+10-0.1077187E+10 0.0000000E+00 010502
-0.6089196E+10-0.1824231E+10 0.0000000E+00 0.0000000E+00 0.2204282E+09 001502
0.9207816E+09 0.2951955E+09 0.0000000E+00 0.0000000E+00 0.4106190E+08 100403
0.5213970E+09 0.1712754E+09 0.0000000E+00 0.0000000E+00 6631668. 010403
0.0000000E+00 0.0000000E+00 0.1633523E+09 0.5106185E+08 0.0000000E+00 001403
0.0000000E+00 0.0000000E+00-0.7351915E+08-0.1786218E+08 0.0000000E+00 100304
0.0000000E+00 0.0000000E+00-0.5421674E+08-0.1484240E+08 0.0000000E+00 010304
-0.6594104E+08-0.1961274E+08 0.0000000E+00 0.0000000E+00 -876841.2 001304
957272.8 483993.8 0.0000000E+00 0.0000000E+00 -278491.8 100205
842758.4 377279.0 0.0000000E+00 0.0000000E+00 -204014.5 010205
0.0000000E+00 0.0000000E+00 -1470356. -769785.5 0.0000000E+00 001205
0.0000000E+00 0.0000000E+00 -347866.2 -91446.44 0.0000000E+00 100106
0.0000000E+00 0.0000000E+00 -187173.1 -46705.02 0.0000000E+00 010106
-147678.0 -41063.65 0.0000000E+00 0.0000000E+00 5969.722 001106
-4728.298 -992.3116 0.0000000E+00 0.0000000E+00 -350.1544 100007
-3167.542 -686.2533 0.0000000E+00 0.0000000E+00 -194.6483 010007
0.0000000E+00 0.0000000E+00 2186.371 288.8825 0.0000000E+00 001007
-0.4819532E+11-0.1761934E+11 0.0000000E+00 0.0000000E+00-0.4699643E+10 000800
0.0000000E+00 0.0000000E+00-0.1659937E+10 0.6058646E+09 0.0000000E+00 000701
-0.2439093E+10-0.7640902E+09 0.0000000E+00 0.0000000E+00 0.4981126E+08 000602
0.0000000E+00 0.0000000E+00 0.1615679E+09 0.3158149E+08 0.0000000E+00 000503
-0.2706562E+08 -8781685. 0.0000000E+00 0.0000000E+00 672354.8 000404
0.0000000E+00 0.0000000E+00 474475.8 -255561.9 0.0000000E+00 000305
-53236.08 -17873.02 0.0000000E+00 0.0000000E+00 -395.6845 000206
0.0000000E+00 0.0000000E+00 5025.881 1131.072 0.0000000E+00 000107
-209.8264 -45.87516 0.0000000E+00 0.0000000E+00 -8.315602 000008"""
COSY_MAP_廖益诚五阶光学优化:str = """ 0.9204298 2.342948 0.0000000E+00 0.0000000E+00 0.2875828E-01 100000
-0.1511535 0.7016887 0.0000000E+00 0.0000000E+00-0.5728590E-02 010000
0.0000000E+00 0.0000000E+00 0.7342880 0.8951463 0.0000000E+00 001000
0.0000000E+00 0.0000000E+00-0.3362275 0.9519796 0.0000000E+00 000100
0.0000000E+00 0.0000000E+00 0.0000000E+00 0.0000000E+00 1.000000 000010
-0.9258508E-03-0.3360115E-01 0.0000000E+00 0.0000000E+00 2.053380 000001
4.885144 28.34025 0.0000000E+00 0.0000000E+00-0.3071154 200000
15.06786 21.59868 0.0000000E+00 0.0000000E+00 0.8201890 110000
-0.3743387 -8.469834 0.0000000E+00 0.0000000E+00 0.2712384E-01 020000
0.0000000E+00 0.0000000E+00 47.42200 -18.29999 0.0000000E+00 101000
0.0000000E+00 0.0000000E+00 20.27668 -8.717686 0.0000000E+00 011000
15.52291 9.154235 0.0000000E+00 0.0000000E+00 -1.067533 002000
0.0000000E+00 0.0000000E+00 18.80651 -30.17513 0.0000000E+00 100100
0.0000000E+00 0.0000000E+00 0.4577091 -21.73826 0.0000000E+00 010100
20.96289 10.99831 0.0000000E+00 0.0000000E+00 0.7893651E-02 001100
0.1727875 1.406819 0.0000000E+00 0.0000000E+00-0.7117329E-01 100001
0.1091187 -0.8499203E-01 0.0000000E+00 0.0000000E+00-0.2705320E-02 010001
0.0000000E+00 0.0000000E+00 -1.566856 2.395144 0.0000000E+00 001001
-2.576880 -10.77361 0.0000000E+00 0.0000000E+00-0.3607443 000200
0.0000000E+00 0.0000000E+00-0.6480240 0.1446635 0.0000000E+00 000101
-0.8688021E-02 0.1899410E-01 0.0000000E+00 0.0000000E+00 -1.892372 000002
-751.5361 -54.09062 0.0000000E+00 0.0000000E+00 -44.47829 300000
-1100.468 -357.1691 0.0000000E+00 0.0000000E+00 -41.05410 210000
-634.0193 -649.4427 0.0000000E+00 0.0000000E+00 -11.40334 120000
-198.2379 -221.3674 0.0000000E+00 0.0000000E+00 -6.510183 030000
0.0000000E+00 0.0000000E+00 2910.656 655.2888 0.0000000E+00 201000
0.0000000E+00 0.0000000E+00 2016.476 572.9640 0.0000000E+00 111000
0.0000000E+00 0.0000000E+00 102.5180 232.2938 0.0000000E+00 021000
676.5579 272.9760 0.0000000E+00 0.0000000E+00 -67.39234 102000
234.7832 83.04571 0.0000000E+00 0.0000000E+00 -23.62024 012000
0.0000000E+00 0.0000000E+00 102.7956 -42.51581 0.0000000E+00 003000
0.0000000E+00 0.0000000E+00 2556.246 522.6957 0.0000000E+00 200100
0.0000000E+00 0.0000000E+00 1245.858 644.6136 0.0000000E+00 110100
0.0000000E+00 0.0000000E+00 -19.90547 331.3041 0.0000000E+00 020100
1844.377 550.1717 0.0000000E+00 0.0000000E+00 -38.88005 101100
362.0099 49.35226 0.0000000E+00 0.0000000E+00 2.797210 011100
0.0000000E+00 0.0000000E+00 198.8522 -41.32451 0.0000000E+00 002100
3.875346 68.39856 0.0000000E+00 0.0000000E+00 20.29021 200001
15.74213 38.29531 0.0000000E+00 0.0000000E+00 16.47867 110001
2.466275 0.1267577 0.0000000E+00 0.0000000E+00 3.895916 020001
0.0000000E+00 0.0000000E+00 -73.32873 -15.41079 0.0000000E+00 101001
0.0000000E+00 0.0000000E+00 -44.19852 -9.491166 0.0000000E+00 011001
2.447199 11.62207 0.0000000E+00 0.0000000E+00 12.46950 002001
1129.736 -3.391685 0.0000000E+00 0.0000000E+00 59.59497 100200
112.3725 -145.4954 0.0000000E+00 0.0000000E+00 11.75505 010200
0.0000000E+00 0.0000000E+00 89.11027 261.0074 0.0000000E+00 001200
0.0000000E+00 0.0000000E+00 -9.325749 94.51866 0.0000000E+00 100101
0.0000000E+00 0.0000000E+00 -1.776634 18.28775 0.0000000E+00 010101
-57.68150 -29.94966 0.0000000E+00 0.0000000E+00 21.34169 001101
11.02948 5.367229 0.0000000E+00 0.0000000E+00 0.6879711E-01 100002
4.628219 2.674120 0.0000000E+00 0.0000000E+00 0.3292724E-01 010002
0.0000000E+00 0.0000000E+00 12.83309 -2.259718 0.0000000E+00 001002
0.0000000E+00 0.0000000E+00 269.9984 430.1148 0.0000000E+00 000300
-4.959560 -17.83708 0.0000000E+00 0.0000000E+00 16.43582 000201
0.0000000E+00 0.0000000E+00 16.26735 2.422964 0.0000000E+00 000102
-0.4942598E-01-0.2954404E-01 0.0000000E+00 0.0000000E+00 1.764757 000003
-10128.51 -6882.101 0.0000000E+00 0.0000000E+00 -1733.562 400000
-14501.48 -15233.01 0.0000000E+00 0.0000000E+00 -2718.366 310000
4200.860 -11813.82 0.0000000E+00 0.0000000E+00 -1030.591 220000
9573.095 -1054.922 0.0000000E+00 0.0000000E+00 -154.8778 130000
3074.121 2487.932 0.0000000E+00 0.0000000E+00 -72.39238 040000
0.0000000E+00 0.0000000E+00 72332.56 22125.21 0.0000000E+00 301000
0.0000000E+00 0.0000000E+00 61655.33 23277.75 0.0000000E+00 211000
0.0000000E+00 0.0000000E+00 -16659.99 1332.440 0.0000000E+00 121000
0.0000000E+00 0.0000000E+00 -11639.21 -2136.958 0.0000000E+00 031000
-11504.25 -4058.841 0.0000000E+00 0.0000000E+00 -6942.310 202000
-16704.77 -7083.668 0.0000000E+00 0.0000000E+00 -4966.235 112000
-9080.320 -6121.165 0.0000000E+00 0.0000000E+00 -442.5223 022000
0.0000000E+00 0.0000000E+00 14410.32 1972.961 0.0000000E+00 103000
0.0000000E+00 0.0000000E+00 5539.239 1062.762 0.0000000E+00 013000
1945.989 890.8557 0.0000000E+00 0.0000000E+00 -269.0154 004000
0.0000000E+00 0.0000000E+00 76115.14 27812.66 0.0000000E+00 300100
0.0000000E+00 0.0000000E+00 42775.69 28209.00 0.0000000E+00 210100
0.0000000E+00 0.0000000E+00 -27098.85 4678.201 0.0000000E+00 120100
0.0000000E+00 0.0000000E+00 -10296.54 -2216.355 0.0000000E+00 030100
-10798.17 -5317.675 0.0000000E+00 0.0000000E+00 -14463.63 201100
-37638.08 -16309.58 0.0000000E+00 0.0000000E+00 -9209.289 111100
-25191.74 -15673.80 0.0000000E+00 0.0000000E+00 -931.4209 021100
0.0000000E+00 0.0000000E+00 28240.85 833.6086 0.0000000E+00 102100
0.0000000E+00 0.0000000E+00 9822.939 1449.074 0.0000000E+00 012100
9468.738 4785.957 0.0000000E+00 0.0000000E+00 -779.7895 003100
-3816.752 -704.9623 0.0000000E+00 0.0000000E+00 77.33579 300001
-4630.142 -1931.511 0.0000000E+00 0.0000000E+00 -84.07735 210001
-2002.052 -2074.119 0.0000000E+00 0.0000000E+00 -320.9107 120001
-547.8424 -955.4662 0.0000000E+00 0.0000000E+00 -91.43704 030001
0.0000000E+00 0.0000000E+00 -5361.356 -1998.433 0.0000000E+00 201001
0.0000000E+00 0.0000000E+00 -5854.618 -1760.359 0.0000000E+00 111001
0.0000000E+00 0.0000000E+00 -598.9825 -248.5713 0.0000000E+00 021001
993.1874 698.4769 0.0000000E+00 0.0000000E+00 111.0650 102001
344.2125 776.1696 0.0000000E+00 0.0000000E+00 12.93733 012001
0.0000000E+00 0.0000000E+00 -502.8705 40.00509 0.0000000E+00 003001
19368.85 6888.843 0.0000000E+00 0.0000000E+00 -5868.323 200200
-7034.784 -553.2738 0.0000000E+00 0.0000000E+00 -3429.026 110200
-11651.28 -5269.160 0.0000000E+00 0.0000000E+00 -702.7318 020200
0.0000000E+00 0.0000000E+00 -7126.538 -11397.64 0.0000000E+00 101200
0.0000000E+00 0.0000000E+00 -4000.989 -5420.249 0.0000000E+00 011200
15550.94 9172.181 0.0000000E+00 0.0000000E+00 -585.6962 002200
0.0000000E+00 0.0000000E+00 -7508.717 -2265.926 0.0000000E+00 200101
0.0000000E+00 0.0000000E+00 -5169.939 -1245.713 0.0000000E+00 110101
0.0000000E+00 0.0000000E+00 -161.8404 -688.3924 0.0000000E+00 020101
-2441.004 -399.5675 0.0000000E+00 0.0000000E+00 41.26114 101101
-998.4166 1155.030 0.0000000E+00 0.0000000E+00 -379.6569 011101
0.0000000E+00 0.0000000E+00 -1423.746 1195.868 0.0000000E+00 002101
-171.3801 -96.47230 0.0000000E+00 0.0000000E+00 -47.05019 200002
-208.6432 -118.6365 0.0000000E+00 0.0000000E+00 -50.90882 110002
-79.22398 26.07429 0.0000000E+00 0.0000000E+00 -17.71257 020002
0.0000000E+00 0.0000000E+00 415.0045 230.0035 0.0000000E+00 101002
0.0000000E+00 0.0000000E+00 128.4404 54.00768 0.0000000E+00 011002
23.96056 -27.52170 0.0000000E+00 0.0000000E+00 3.838347 002002
0.0000000E+00 0.0000000E+00 -13503.89 -14601.50 0.0000000E+00 100300
0.0000000E+00 0.0000000E+00 -3940.617 -9079.921 0.0000000E+00 010300
11558.56 9075.666 0.0000000E+00 0.0000000E+00 -179.5769 001300
-3787.659 -498.3720 0.0000000E+00 0.0000000E+00 -440.9624 100201
-666.3428 1087.509 0.0000000E+00 0.0000000E+00 -420.8389 010201
0.0000000E+00 0.0000000E+00 227.6613 3041.432 0.0000000E+00 001201
0.0000000E+00 0.0000000E+00 478.4667 46.83745 0.0000000E+00 100102
0.0000000E+00 0.0000000E+00 40.30990 26.67427 0.0000000E+00 010102
424.6671 49.75804 0.0000000E+00 0.0000000E+00 4.342524 001102
-14.14168 -4.439463 0.0000000E+00 0.0000000E+00 1.807706 100003
-12.44761 -11.94868 0.0000000E+00 0.0000000E+00 0.8633134 010003
0.0000000E+00 0.0000000E+00 4.824232 15.24525 0.0000000E+00 001003
2140.961 3661.622 0.0000000E+00 0.0000000E+00 -75.66233 000400
0.0000000E+00 0.0000000E+00 31.18157 987.0883 0.0000000E+00 000301
214.0472 74.76341 0.0000000E+00 0.0000000E+00 -20.84452 000202
0.0000000E+00 0.0000000E+00 -26.54261 12.10561 0.0000000E+00 000103
0.4511485 0.3378208 0.0000000E+00 0.0000000E+00 -1.787320 000004
244501.4 -6419.457 0.0000000E+00 0.0000000E+00 -9020.129 500000
762489.8 102060.8 0.0000000E+00 0.0000000E+00 14372.13 410000
1157718. 385230.8 0.0000000E+00 0.0000000E+00 80141.05 320000
823756.3 459495.8 0.0000000E+00 0.0000000E+00 76498.74 230000
216387.4 244788.9 0.0000000E+00 0.0000000E+00 24924.13 140000
13335.43 45881.62 0.0000000E+00 0.0000000E+00 3015.658 050000
0.0000000E+00 0.0000000E+00 1301482. 411324.5 0.0000000E+00 401000
0.0000000E+00 0.0000000E+00 975072.3 318398.0 0.0000000E+00 311000
0.0000000E+00 0.0000000E+00 -1334454. -406462.5 0.0000000E+00 221000
0.0000000E+00 0.0000000E+00 -1027818. -361623.4 0.0000000E+00 131000
0.0000000E+00 0.0000000E+00 -82782.34 -64167.83 0.0000000E+00 041000
-255233.2 -124129.7 0.0000000E+00 0.0000000E+00 -87611.92 302000
-288470.5 -197880.3 0.0000000E+00 | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VpnGatewayArgs', 'VpnGateway']
@pulumi.input_type
class VpnGatewayArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
stack_type: Optional[pulumi.Input['VpnGatewayStackType']] = None,
vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['VpnGatewayVpnGatewayInterfaceArgs']]]] = None):
"""
The set of arguments for constructing a VpnGateway resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
:param pulumi.Input['VpnGatewayStackType'] stack_type: The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.
:param pulumi.Input[Sequence[pulumi.Input['VpnGatewayVpnGatewayInterfaceArgs']]] vpn_interfaces: The list of VPN interfaces associated with this VPN gateway.
"""
pulumi.set(__self__, "region", region)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if stack_type is not None:
pulumi.set(__self__, "stack_type", stack_type)
if vpn_interfaces is not None:
pulumi.set(__self__, "vpn_interfaces", vpn_interfaces)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="stackType")
def stack_type(self) -> Optional[pulumi.Input['VpnGatewayStackType']]:
"""
The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.
"""
return pulumi.get(self, "stack_type")
@stack_type.setter
def stack_type(self, value: Optional[pulumi.Input['VpnGatewayStackType']]):
pulumi.set(self, "stack_type", value)
@property
@pulumi.getter(name="vpnInterfaces")
def vpn_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnGatewayVpnGatewayInterfaceArgs']]]]:
"""
The list of VPN interfaces associated with this VPN gateway.
"""
return pulumi.get(self, "vpn_interfaces")
@vpn_interfaces.setter
def vpn_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnGatewayVpnGatewayInterfaceArgs']]]]):
pulumi.set(self, "vpn_interfaces", value)
class VpnGateway(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
stack_type: Optional[pulumi.Input['VpnGatewayStackType']] = None,
vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnGatewayVpnGatewayInterfaceArgs']]]]] = None,
__props__=None):
"""
Creates a VPN gateway in the specified project and region using the data included in the request.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
:param pulumi.Input[str] network: URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.
:param pulumi.Input['VpnGatewayStackType'] stack_type: The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnGatewayVpnGatewayInterfaceArgs']]]] vpn_interfaces: The list of VPN interfaces associated with this VPN gateway.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnGatewayArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a VPN gateway in the specified project and region using the data included in the request.
:param str resource_name: The name of the resource.
:param VpnGatewayArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnGatewayArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
stack_type: Optional[pulumi.Input['VpnGatewayStackType']] = None,
vpn_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnGatewayVpnGatewayInterfaceArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnGatewayArgs.__new__(VpnGatewayArgs)
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["project"] = project
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["request_id"] = request_id
__props__.__dict__["stack_type"] = stack_type
__props__.__dict__["vpn_interfaces"] = vpn_interfaces
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["self_link"] = None
super(VpnGateway, __self__).__init__(
'google-native:compute/beta:VpnGateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnGateway':
"""
Get an existing VpnGateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnGatewayArgs.__new__(VpnGatewayArgs)
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["description"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["labels"] = | |
if not self.tunables:
self.tunables = dict()
assert index not in self.tunables
self.tunables[index] = (value,size)
def add_operation_index(self, index, uid):
if not self.operation_indexes:
self.operation_indexes = dict()
self.operation_indexes[index] = uid
def add_close_index(self, index, uid):
if not self.close_indexes:
self.close_indexes = dict()
self.close_indexes[index] = uid
def get_parent_context(self):
assert self.op.context is not None
return self.op.context
def get_depth(self):
if self.depth is None:
self.depth = self.op.get_depth()
return self.depth
def merge(self, other):
if self.op.task_id == -1:
self.op.task_id = other.op.task_id
elif other.op.task_id != -1:
assert self.op.task_id == other.op.task_id
if self.point.dim == 0:
self.point = other.point
elif other.point.dim != 0:
assert self.point == other.point
if not self.operations:
self.operations = other.operations
else:
assert not other.operations
if not self.processor:
self.processor = other.processor
else:
assert not other.processor
if not self.priority:
self.priority = other.priority
else:
assert not other.priority
if not self.premappings:
self.premappings = other.premappings
else:
assert not other.premappings
if not self.postmappings:
self.postmappings = other.postmappings
else:
assert not other.postmappings
if not self.tunables:
self.tunables = other.tunables
else:
assert not other.tunables
if not self.operation_indexes:
self.operation_indexes = other.operation_indexes
else:
assert not other.operation_indexes
if not self.close_indexes:
self.close_indexes = other.close_indexes
else:
assert not other.close_indexes
if not self.variant:
self.variant = other.variant
else:
assert not other.variant
def perform_logical_dependence_analysis(self, perform_checks):
# If we don't have any operations we are done
if not self.operations:
return True
# If this is the top-level task's context, we can skip it
# since we know there is only one task in it
if self.depth == 0:
assert len(self.operations) == 1
return True
print('Performing logical dependence analysis for %s...' % str(self))
if self.op.state.verbose:
print(' Analyzing %d operations...' % len(self.operations))
# See if we have any restrictions that we need to care about
if self.op.reqs:
for idx,req in self.op.reqs.iteritems():
if (req.priv == READ_WRITE or req.priv == READ_ONLY) and \
req.coher == SIMULTANEOUS:
assert idx in self.op.mappings
mapping = self.op.mappings[idx]
# Add a restriction for all the fields
if not self.restrictions:
self.restrictions = list()
for field in req.fields:
assert field.fid in mapping
inst = mapping[field.fid]
# If they virtual mapped then there is no way
self.restrictions.append(
Restriction(req.logical_node, field, inst))
# Iterate over all the operations in order and
# have them perform their analysis
success = True
for op in self.operations:
if not op.perform_logical_analysis(perform_checks):
success = False
break
# Reset the logical state when we are done
self.op.state.reset_logical_state()
# We can clear this out now since we don't need them anymore
self.restrictions = None
self.dumb_acquisitions = None
print("Pass" if success else "FAIL")
return success
def check_restricted_coherence(self, op, req):
# If we have no restrictions, nothing to worry about
if not self.restrictions:
return
# Requirements that are read-only or reduce can never be restricted
if req.priv == READ_ONLY or req.priv == REDUCE:
return
# Otherwise iterate through the restrictions and
# find any restrictions we have
for restrict in self.restrictions:
for field in req.fields:
if restrict.find_restrictions(req.logical_node, field, req):
# If we found restrictions then we know we are done
break
if restrict.find_restrictions(req.logical_node, field, req):
assert field in req.restricted_fields
# Can break out of the inner loop here
# and go on to the next field
break
def add_acquisition(self, req):
if not self.restrictions:
print("WARNING: Unnecessary acquire in "+str(self)+
" with no restrictions")
if not self.dumb_acquisitions:
self.dumb_acquisitions = list()
for field in req.fields:
self.dumb_acquisitions.append(Acquisition(req.logical_node, field))
for field in req.fields:
# Try to add it to any of the existing restrictions
success = False
for restrict in self.restrictions:
if restrict.add_acquisition(req.logical_node, field):
success = True
break
if not success:
print("WARNING: Unnecessary acquire in "+str(self))
if not self.dumb_acquisitions:
self.dumb_acquisitions = list()
self.dumb_acquisitions.append(Acquisition(req.logical_node, field))
return True
def remove_acquisition(self, req):
for field in req.fields:
success = False
if self.restrictions:
for restrict in self.restrictions:
if restrict.remove_acquisition(req.logical_node, field):
success = True
break
if not success and self.dumb_acquisitions:
for acquire in self.dumb_acquisitions:
if acquire.matches(req.logical_node, field):
success = True
self.dumb_acquisitions.remove(acquire)
break
if acquire.remove_acquisition(req.logical_node, field):
success = True
break
if not success:
print("ERROR: Invalid release operation")
if self.op.state.assert_on_error:
assert False
return False
return True
def add_restriction(self, req, mapping):
for field in req.fields:
assert field.fid in self.mapping
inst = self.mapping[field.fid]
assert not inst.is_virtual()
if not self.restrictions:
# Try to add it to any existing trees
success = False
for restrict in self.restrictions:
if restrict.add_restrict(req.logical_node, field, inst):
success = True
break
if success:
continue
# If we make it here, add a new restriction
self.restrictions.append(
Restriction(req.logical_node, field, inst))
return True
def remove_restriction(self, req):
for field in req.fields:
success = False
if self.restrictions:
for restrict in self.restrictions:
if restrict.matches(req.logical_node, field):
success = True
self.restrictions.remove(restrict)
break
if restrict.remove_restrict(req.logical_node, field):
success = True
break
if not success and self.dumb_acquisitions:
for acquire in self.dumb_acquisitions:
if acquire.remove_restrict(req.logical_node, field):
success = True
break
if not success:
print("ERROR: Illegal detach with no matching restriction")
if self.op.state.assert_on_error:
assert False
return False
return True
def perform_logical_sanity_analysis(self):
# Run the old version of the checks that
# is more of a sanity check on our algorithm that
# doesn't depend on our implementation but doesn't
# really tell us what it means if something goes wrong
if not self.operations or len(self.operations) < 2:
return True
print('Performing logical sanity analysis for %s...' % str(self))
# Iterate over all operations from 1 to N and check all their
# dependences against all the previous operations in the context
for idx in range(1, len(self.operations)):
# Find all the backwards reachable operations
current_op = self.operations[idx]
# No need to do anything if there are no region requirements
if not current_op.reqs and current_op.kind != FENCE_OP_KIND:
continue
reachable = set()
current_op.get_logical_reachable(reachable, False)
# Do something special for fence operations
if current_op.kind == FENCE_OP_KIND: # special path for fences
for prev in range(idx):
if not prev in reachable:
print("ERROR: Failed logical sanity check. No mapping "+
"dependence between previous "+str(prev)+" and "+
"later "+str(current_op))
if self.op.state.assert_on_error:
assert False
return False
else: # The normal path
for prev in range(idx):
if not current_op.analyze_logical_interference(
self.operations[prev], reachable):
print("FAIL")
return False
print("Pass")
return True
def record_instance_use(self, inst, fid, node):
depth = self.get_depth()
# If this is the top-level context there is nothing to do
if depth == 0:
return
assert self.used_instances is not None
# If we've already handled it then we are done
if (inst,fid) in self.used_instances:
return
# If we have virtual mappings we have check to see if this is
# a new field in which case we have to copy in the users from
# the next outer depth, but first we have to tell our context
# that we are doing this too to handle nested virtual mappings
# correctly. :)
if self.virtual_indexes:
field = node.field_space.get_field(fid)
for idx in self.virtual_indexes:
assert idx in self.op.reqs
req = self.op.reqs[idx]
if req.logical_node is not node:
continue
if field not in req.fields:
continue
# We found a virtual instance for a virtual mapped requirement
assert self.op.context is not None
# First tell our parent that we are using the instance
self.op.context.record_instance_use(inst, fid, req.parent)
# Clone the user list
parent_depth = depth - 1
inst.clone_users(field, parent_depth, depth)
# Then add our own user at our parent's depth
inst.add_user(depth=parent_depth, field=field, op=self.op, req=req)
# We are done
break
# Record that we handled this field for this instance
self.used_instances.add((inst,fid))
def perform_task_physical_analysis(self, perform_checks):
if not self.operations:
return True
depth = self.get_depth()
assert self.used_instances is None
self.used_instances = set()
# Initialize our regions at our depth
assert self.virtual_indexes is None
if self.op.reqs:
for idx,req in self.op.reqs.iteritems():
# Skip any no access requirements
if req.is_no_access():
continue
assert idx in self.op.mappings
mappings = self.op.mappings[idx]
for field in req.fields:
assert field.fid in mappings
inst = mappings[field.fid]
if inst.is_virtual():
# Only need to do this if we are not the root
if depth > 0:
# If you ever hit this assertion it is indicative of a
# runtime bug because the runtime should never allow
# a virtual instance to be made for a region requirement
# that only has reduction privileges
| |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six, json
from ..util import text, unicode_obj, unicode_list
__author__ = "nebula"
class Term(object):
def __init__(self, left, op, right, remark="", scope="rt"):
if not isinstance(left, Exp):
left = Exp.get_exp(left)
if not left:
raise RuntimeError("invalid left expression")
if left.type == "func" and left.subtype in {"setblacklist", "time", "getlocation", "sleep", "spl"}:
right = None
op = None
else:
if not isinstance(right, Exp):
right = Exp.get_exp(right)
if not right:
raise RuntimeError("invalid right expression")
self._left = left
self._right = right
self._op = text(op or "")
self._remark = text(remark)
self._scope = scope
@property
def left(self):
return self._left
@left.setter
def left(self, left):
if not isinstance(left, Exp):
left = Exp.get_exp(left)
if not left:
raise RuntimeError("invalid left expression")
self._left = left
@property
def right(self):
return self._right
@right.setter
def right(self, right):
if not isinstance(right, Exp):
right = Exp.get_exp(right)
if not right:
raise RuntimeError("invalid right expression")
self._right = right
@property
def op(self):
return self._op
@op.setter
def op(self, op):
self._op = text(op)
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, remark):
self._remark = text(remark)
@property
def scope(self):
return self._scope
@scope.setter
def scope(self, scope):
self._scope = scope
def get_dict(self):
op = self.op
if not op:
op = ""
if not self.right:
right = None
else:
right = self.right.get_dict()
return {
"left": self.left.get_dict(),
"op": op,
"right": right,
"remark": self.remark,
"scope": self.scope
}
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
return Term(d.get("left"), d.get("op"), d.get("right"), d.get("remark", ""), d.get("scope", "rt"))
@staticmethod
def from_json(json_str):
return Term.from_dict(json.loads(json_str))
def copy(self):
return Term.from_dict(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
def __str__(self):
return "Term[{}]".format(self.get_dict())
def __hash__(self):
result = 0
result |= hash(self.left)
result |= hash(self.op)
result |= hash(self.right)
result |= hash(self.remark)
result |= hash(self.scope)
return
class Exp(object):
@staticmethod
def get_exp(data):
if isinstance(data, (six.text_type, six.binary_type)):
data = text(data)
data = json.loads(data)
if not isinstance(data, dict):
return None
for cls in [ConstantExp, EventFieldExp, FuncCountExp, FuncGetVariableExp, SetBlacklistExp, TimeExp, GetLocationExp, SleepExp, SplExp]:
result = cls.from_dict(data)
if result:
return result
pass
class ConstantExp(Exp):
"""
A constant string value.
{
"type": "constant",
"subtype": "",
"config": {
"value": "1,2"
}
}
"""
TYPE = "constant"
SUBTYPE = ""
def __init__(self, value):
self._value = text(value)
@property
def type(self):
return ConstantExp.TYPE
@property
def subtype(self):
return ConstantExp.SUBTYPE
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = text(value)
def get_dict(self):
return {
"type": self.type,
"subtype": self.subtype,
"config": {
"value": self.value,
}
}
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
if not d or d.get("type") != ConstantExp.TYPE:
return None
config = d.get("config")
if not config:
return None
return ConstantExp(config.get("value"))
@staticmethod
def from_json(json_str):
return ConstantExp.from_dict(json.loads(json_str))
def copy(self):
return ConstantExp.from_dict(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
def __str__(self):
return "ConstantExp[{}]".format(self.get_dict())
def __hash__(self):
result = 0
result |= hash(self.type)
result |= hash(self.subtype)
result |= hash(self.value)
return result
class EventFieldExp(Exp):
"""
Get one field of event
{
"type": "event",
"subtype": "",
"config": {
"event": ["online", "http_static"],
"field": "c_ip"
}
}
"""
TYPE = "event"
SUBTYPE = ""
def __init__(self, event, field):
self._event = unicode_list(event or [])
self._field = text(field)
@property
def type(self):
return EventFieldExp.TYPE
@property
def subtype(self):
return EventFieldExp.SUBTYPE
@property
def event(self):
return self._event
@event.setter
def event(self, event):
self._event = unicode_list(event or [])
@property
def field(self):
return self._field
@field.setter
def field(self, field):
self._field = text(field)
def get_dict(self):
return {
"type": self.type,
"subtype": self.subtype,
"config": {
"event": self.event,
"field": self.field
}
}
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
if not d or d.get("type") != EventFieldExp.TYPE:
return None
config = d.get("config")
if not config:
return None
return EventFieldExp(config.get("event"), config.get("field"))
@staticmethod
def from_json(json_str):
return EventFieldExp.from_dict(json.loads(json_str))
def copy(self):
return EventFieldExp.from_dict(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
def __str__(self):
return "EventFieldExp[{}]".format(self.get_dict())
def __hash__(self):
result = 0
result |= hash(self.type)
result |= hash(self.subtype)
for i in self.event:
result |= hash(i)
result |= hash(self.field)
return result
class FuncGetVariableExp(Exp):
"""
Function, get one variable value by trigger event and its selected keys.
{
"type": "func",
"subtype": "getvariable",
"config": {
"variable": ["online", "http_static_count_ip"],
"trigger": {
"event": ["online", "http_static"],
"keys": ["field1", "field2"]
}
}
}
"""
TYPE = "func"
SUBTYPE = "getvariable"
def __init__(self, variable, trigger_event, trigger_fields):
self._variable = unicode_list(variable or list())
self._trigger_event = unicode_list(trigger_event or list())
self._trigger_fields = unicode_list(trigger_fields or list())
@property
def type(self):
return FuncGetVariableExp.TYPE
@property
def subtype(self):
return FuncGetVariableExp.SUBTYPE
@property
def variable(self):
return self._variable
@variable.setter
def variable(self, variable):
self._variable = unicode_list(variable or list())
@property
def trigger_event(self):
return self._trigger_event
@trigger_event.setter
def trigger_event(self, trigger_event):
self._trigger_event = unicode_list(trigger_event or list())
@property
def trigger_fields(self):
return self._trigger_fields
@trigger_fields.setter
def trigger_fields(self, trigger_fields):
self._trigger_fields = unicode_list(trigger_fields or list())
def get_dict(self):
return {
"type": self.type,
"subtype": self.subtype,
"config": {
"variable": self.variable,
"trigger": {
"event": self._trigger_event,
"keys": self._trigger_fields
}
}
}
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
if not d or d.get("type") != FuncGetVariableExp.TYPE or d.get("subtype") != FuncGetVariableExp.SUBTYPE:
return None
config = d.get("config")
if not config:
return None
return FuncGetVariableExp(config.get("variable"), config.get("trigger", {}).get("event"),
config.get("trigger", {}).get("keys"))
@staticmethod
def from_json(json_str):
return FuncGetVariableExp.from_dict(json.loads(json_str))
def copy(self):
return FuncGetVariableExp.from_dict(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
def __str__(self):
return "FuncGetVariableExp[{}]".format(self.get_dict())
def __hash__(self):
result = 0
result |= hash(self.type)
result |= hash(self.subtype)
for i in self.variable:
result |= hash(i)
for i in self.trigger_event:
result |= hash(i)
for i in self.trigger_fields:
result |= hash(i)
return result
class FuncCountExp(Exp):
"""
Build an interval counter.
{
"type": "func",
"subtype": "count",
"config": {
"sourceevent": ["online", "http_dynamic"],
"condition": [
{
"left": "method",
"op": "=",
"right": "get"
}
],
"interval": 300,
"algorithm": "count",
"operand": [],
"groupby": ["c_ip", "url"],
"trigger": {
"event": ["online", "http_static"],
"keys": ["c_ip","url"]
}
}
}
"""
TYPE = "func"
SUBTYPE = "count"
def __init__(self, source_event, condition, interval, algorithm, operand, groupby, trigger_event, trigger_fields):
self._source_event = unicode_list(source_event or list())
self._condition = unicode_obj(condition)
self._interval = int(interval)
self._algorithm = text(algorithm)
self._operand = unicode_list(operand or [])
self._groupby = unicode_list(groupby or list())
self._trigger_event = unicode_list(trigger_event or list())
self._trigger_fields = unicode_list(trigger_fields or list())
@property
def type(self):
return FuncCountExp.TYPE
@property
def subtype(self):
return FuncCountExp.SUBTYPE
@property
def source_event(self):
return self._source_event
@source_event.setter
def source_event(self, source_event):
self._source_event = unicode_list(source_event or list())
@property
def condition(self):
return self._condition
@condition.setter
def condition(self, condition):
self._condition = unicode_obj(condition)
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
self._interval = int(interval)
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
self._algorithm = text(algorithm)
@property
def operand(self):
return self._operand
@operand.setter
def operand(self, operand):
self._operand = unicode_list(operand or [])
@property
def groupby(self):
return self._groupby
@groupby.setter
def groupby(self, groupby):
self._groupby = unicode_list(groupby or list())
@property
def trigger_event(self):
return self._trigger_event
@trigger_event.setter
def trigger_event(self, trigger_event):
self._trigger_event = unicode_list(trigger_event or list())
@property
def trigger_fields(self):
return self._trigger_fields
@trigger_fields.setter
def trigger_fields(self, trigger_fields):
self._trigger_fields = unicode_list(trigger_fields or list())
def get_dict(self):
return {
"type": self.type,
"subtype": self.subtype,
"config": {
"sourceevent": self.source_event,
"condition": self.condition,
"interval": self.interval,
"algorithm": self.algorithm,
"groupby": self.groupby,
"operand": self.operand,
"trigger": {
"event": self._trigger_event,
"keys": self._trigger_fields
}
}
}
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
if not d or d.get("type") != FuncCountExp.TYPE or d.get("subtype") != FuncCountExp.SUBTYPE:
return None
config = d.get("config")
if not config:
return None
return FuncCountExp(config.get("sourceevent"), config.get("condition"), config.get("interval"),
config.get("algorithm"), config.get("operand"), config.get("groupby"), config.get("trigger", {}).get("event"),
config.get("trigger", {}).get("keys"))
@staticmethod
def from_json(json_str):
return FuncCountExp.from_dict(json.loads(json_str))
def copy(self):
return FuncCountExp.from_dict(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
def __str__(self):
return "FuncCountExp[{}]".format(self.get_dict())
def __hash__(self):
result = 0
result |= hash(self.type)
result |= hash(self.subtype)
for i in self.source_event:
result |= hash(i)
for i in self.condition:
# left, op, right
result |= hash(i.get("left", ""))
result |= hash(i.get("op", ""))
result |= hash(i.get("right", ""))
result |= hash(self.interval)
result |= hash(self.algorithm)
for i in self.groupby:
result |= hash(i)
for i in self.trigger_event:
result |= hash(i)
for i in self.trigger_event:
result |= hash(i)
for i in self.trigger_fields:
result |= hash(i)
for i in self.operand:
result |= hash(i)
return result
class SetBlacklistExp(Exp):
"""
Set a blacklist item action.
{
"type": "func",
"subtype": "setblacklist",
"config": {
"name": self.name,
"checktype": ,
"checkvalue": self.check_value,
"decision": self.decision,
"checkpoints": "login",
"ttl": self.ttl,
"remark": self.remark
}
}
"""
TYPE = "func"
SUBTYPE | |
in attributes.items():
setattr(instance.collection_attributes, name, value)
else:
raise Exception("{0} not supported, only 'memory' savepoint supported".format(format))
instance._private.previous = self._private.previous
instance._private.version = 0
self._private.previous = instance
return instance
def new_working_copy(self):
if self._private.is_working_copy:
previous = self._private.previous
if previous is None:
raise Exception("you have not savepoint for this set, you cannot create a working copy please use copy instead".format(format))
else:
previous = self
result = previous.copy()
result._private.previous = previous
return result
def get_timestamp(self):
return self.collection_attributes.timestamp
def iter_history(self):
if self._private.is_working_copy:
current = self._private.previous
else:
current = self
while not current is None:
yield current
current = current._private.previous
def get_state_at_timestamp(self, timestamp):
previous_timestamp = None
states_and_distances = []
for state in self.iter_history():
timestamp_of_state = state.get_timestamp()
if timestamp_of_state is None:
continue
distance = abs(timestamp_of_state - timestamp)
states_and_distances.append((state, distance,))
if len(states_and_distances) == 0:
raise exceptions.AmuseException("You asked for a state at timestamp '{0}', but the set does not have any saved states so this state cannot be returned")
accompanying_state, min_distance = states_and_distances[0]
for state, distance in states_and_distances:
if distance < min_distance:
min_distance = distance
accompanying_state = state
return accompanying_state
def previous_state(self):
return self._private.previous
@property
def history(self):
return reversed(list(self.iter_history()))
def get_timeline_of_attribute(self, particle_key, attribute):
timeline = []
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
timeline.append((x.collection_attributes.timestamp, x._get_value_of_attribute(x[index], index, attribute)))
return timeline
def get_timeline_of_attribute_as_vector(self, particle_key, attribute):
timeline = AdaptingVectorQuantity()
chrono_values = AdaptingVectorQuantity()
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
timeline.append(x.collection_attributes.timestamp)
chrono_values.append(x._get_value_of_attribute(x[index], index, attribute))
return timeline, chrono_values
def get_timeline_of_attributes(self, particle_key, attributes):
result = [[] for x in range(len(attributes)+1)]
units = [None for x in range(len(attributes)+1)]
for x in self.history:
if x.has_key_in_store(particle_key):
index = x.get_indices_of_keys([particle_key])[0]
if units[0] is None:
units[0] = x.collection_attributes.timestamp.unit
for i, attribute in enumerate(attributes):
quantity = x._get_value_of_attribute(x[index], index, attribute)
if units[i+1] is None:
units[i+1] = quantity.unit
result[i+1].append(quantity.value_in(units[i+1]))
return list(map(lambda value,unit : unit.new_quantity(value), result, units))
def remove_attribute_from_store(self, name):
self._private.attribute_storage.remove_attribute_from_store(name)
def add_particles_to_store(self, keys, attributes = [], values = []):
self._private.attribute_storage.add_particles_to_store(keys, attributes, values)
self._private.version += 1
def remove_particles_from_store(self, indices):
self._private.attribute_storage.remove_particles_from_store(indices)
self._private.version += 1
def get_values_in_store(self, indices, attributes):
missing_attributes = set(attributes) - set(self.get_attribute_names_defined_in_store()) - set(["index_in_code"])
if len(missing_attributes) == 0:
return self._private.attribute_storage.get_values_in_store(indices, attributes)
defined_attributes = list(set(attributes) - missing_attributes)
defined_values = dict(list(zip(
defined_attributes,
self._private.attribute_storage.get_values_in_store(indices, defined_attributes)
)))
#print missing_attributes, "shape" in missing_attributes
#if "shape" in missing_attributes:
# import traceback
# traceback.print_stack()
# raise Exception("hello is this ok????")
subset = self[indices]
tmp = [defined_values[attribute] if attribute in defined_values else subset._get_derived_attribute_value(attribute) for attribute in attributes]
return tmp
def get_values_in_store_async(self, indices, attributes):
return self._private.attribute_storage.get_values_in_store_async(indices, attributes)
def get_indices_of_keys(self, keys):
return self._private.attribute_storage.get_indices_of(keys)
def set_values_in_store(self, indices, attributes, values):
self._private.attribute_storage.set_values_in_store(indices, attributes, values)
def set_values_in_store_async(self, indices, attributes, values):
return self._private.attribute_storage.set_values_in_store_async(indices, attributes, values)
def get_attribute_names_defined_in_store(self):
return self._private.attribute_storage.get_defined_attribute_names()
def get_settable_attribute_names_defined_in_store(self):
return self._private.attribute_storage.get_defined_settable_attribute_names()
def get_all_keys_in_store(self):
return self._private.attribute_storage.get_all_keys_in_store()
def get_all_indices_in_store(self):
return self._private.attribute_storage.get_all_indices_in_store()
def has_key_in_store(self, key):
return self._private.attribute_storage.has_key_in_store(key)
def get_value_in_store(self, index, attribute):
return self._private.attribute_storage.get_value_in_store(index, attribute)
def can_extend_attributes(self):
return self._private.attribute_storage.can_extend_attributes()
def _remove_indices_in_attribute_storage(self, indices):
self._private.attribute_storage._remove_indices(indices)
self._private.version += 1
def _add_indices_in_attribute_storage(self, indices):
self._private.attribute_storage._add_indices(indices)
self._private.version += 1
@staticmethod
def is_quantity():
return False
def new_particle(key = None, **keyword_arguments):
return self.add_particle(Particle(key = key, **keyword_arguments))
class BoundSupersetParticlesFunctionAttribute(object):
def __init__(self, name, superset):
self.name = name
self.superset = superset
self.subsetfunctions = []
def add_subsetfunction(self, callable):
self.subsetfunctions.append(callable)
def __call__(self, *list_arguments, **keyword_arguments):
subset_results = []
for x in self.subsetfunctions:
subset_results.append(x(*list_arguments, **keyword_arguments))
if subset_results[0] is None:
return None
if isinstance(subset_results[0], AbstractParticleSet):
return ParticlesSuperset(subset_results)
if hasattr(subset_results[0], 'unit'):
result = AdaptingVectorQuantity()
for one_result in subset_results:
result.extend(one_result)
return result
return [item for one_result in subset_results for item in one_result]
class DerivedSupersetAttribute(DerivedAttribute):
def __init__(self, name):
self.name = name
def get_values_for_entities(self, superset):
result = None
offset = 0
for subset in superset._private.particle_sets:
subset_result = getattr(subset, self.name)
if hasattr(subset_result, '__call__'):
if len(subset) > 0:
if result is None:
result = BoundSupersetParticlesFunctionAttribute(self.name, superset)
result.add_subsetfunction(subset_result)
elif hasattr(subset_result, 'unit'):
if len(subset_result) == 0:
continue
if result is None:
shape = [len(superset),] + list(subset_result.shape[1:])
result = VectorQuantity.zeros(shape, subset_result.unit)
offset = 0
try:
result[offset:len(subset_result)+offset] = subset_result
except ValueError:
raise AttributeError("Subsets return incompatible quantities for attribute '{0}', attribute cannot be queried from the superset".format(self.name))
offset += len(subset_result)
elif hasattr(subset_result, 'dtype'):
if len(subset_result) == 0:
continue
if result is None:
shape = [len(superset),] + list(subset_result.shape[1:])
result = numpy.zeros(shape, dtype=subset_result.dtype)
offset = 0
try:
result[offset:len(subset_result)+offset] = subset_result
except ValueError:
raise AttributeError("Subsets return incompatible quantities for attribute '{0}', attribute cannot be queried from the superset".format(self.name))
offset += len(subset_result)
else:
raise exceptions.AmuseException("cannot handle this type of attribute on supersets yet")
return result
def set_values_for_entities(self, superset, value):
raise exceptions.AmuseException("cannot set value of attribute '{0}'")
def get_value_for_entity(self, superset, particle, index):
raise exceptions.AmuseException("Internal AMUSE error, a single entity (Particle) should always be bound to the subset and not the superset")
def set_value_for_entity(self, superset, key, value):
raise exceptions.AmuseException("Internal AMUSE error, a single entity (Particle) should always be bound to the subset and not the superset")
class ParticlesSuperset(AbstractParticleSet):
"""A superset of particles. Attribute values are not
stored by the superset. The superset provides a view
on two or more sets of particles.
Superset objects are not supposed to be created
directly. Instead use the ``union`` methods.
>>> p1 = Particles(3)
>>> p1.mass = [10.0, 20.0, 30.0] | units.kg
>>> p2 = Particles(3)
>>> p2.mass = [40.0, 50.0, 60.0] | units.kg
>>> p = ParticlesSuperset([p1, p2])
>>> print len(p)
6
>>> print p.mass
[10.0, 20.0, 30.0, 40.0, 50.0, 60.0] kg
>>> p[4].mass = 70 | units.kg
>>> print p.mass
[10.0, 20.0, 30.0, 40.0, 70.0, 60.0] kg
>>> p2[1].mass
quantity<70.0 kg>
>>> cp = p.copy()
>>> print len(cp)
6
>>> print cp.mass
[10.0, 20.0, 30.0, 40.0, 70.0, 60.0] kg
"""
def __init__(self, particle_sets, index_to_default_set=None, names = None):
AbstractParticleSet.__init__(self)
if not names is None:
self._private.mapping_from_name_to_set = {}
for name, particle_set in zip(names, particle_sets):
self._private.mapping_from_name_to_set[name] = particle_set
self._private.particle_sets = list(particle_sets)
self._private.index_to_default_set = index_to_default_set
names_of_derived_attributes_in_all_subsets = None
for subset in particle_sets:
derived_attribute_names = set(subset._derived_attributes.keys())
if names_of_derived_attributes_in_all_subsets is None:
names_of_derived_attributes_in_all_subsets = set(derived_attribute_names)
else:
names_of_derived_attributes_in_all_subsets &= derived_attribute_names
names_of_derived_attributes_in_all_subsets -= set(self.GLOBAL_DERIVED_ATTRIBUTES.keys())
for name in names_of_derived_attributes_in_all_subsets:
self._derived_attributes[name] = DerivedSupersetAttribute(name)
self._private.version = -1
self._ensure_updated_set_properties()
if self.has_duplicates():
raise exceptions.AmuseException("Unable to add a particle, because it was already part of this set.")
def _ensure_updated_set_properties(self):
if self._private.version == self._get_subsets_version():
return
self._private.version = self._get_subsets_version()
self._private.length = numpy.sum([len(x) for x in self._private.particle_sets])
self._private.indices = numpy.arange(self._private.length)
self._private.keys = self._get_concatenated_keys_in_store()
self._private.key_to_index = {}
d = self._private.key_to_index
index = 0
for x in self._private.keys:
d[x] = index
index += 1
def can_extend_attributes(self):
for x in self._private.particle_sets:
if not x.can_extend_attributes():
return False
return True
def __len__(self):
self._ensure_updated_set_properties()
return self._private.length
def __iter__(self):
for set in self._private.particle_sets:
for particle in set:
yield particle
def _get_subsets_version(self):
versions = [[x._get_version()] for x in self._private.particle_sets]
return numpy.sum(versions)
def _get_version(self):
self._ensure_updated_set_properties()
return self._private.version
def __getitem__(self, index):
self._ensure_updated_set_properties()
offset = 0
if isinstance(index, str):
return self.get_subset(index)
else:
keys = self.get_all_keys_in_store()[index]
if hasattr(keys, '__iter__'):
return self._subset(keys)
else:
index = self.get_all_indices_in_store()[index]
for set in self._private.particle_sets:
length = len(set)
if index < (offset+length):
return set[index - offset]
offset += length
raise Exception('index not found on superset')
def _get_particle(self, key):
if self.has_key_in_store(key):
return self._get_subset_for_key(key)._get_particle(key)
else:
return None
def _get_particle_unsave(self, key, index = -1):
if index >= 0:
offset, subset = self._get_offset_and_subset_for_index(index)
index -= offset
return subset._get_particle_unsave(key, subset.get_all_indices_in_store()[index])
else:
return self._get_subset_for_key(key)._get_particle_unsave(key)
def _split_keys_over_sets(self, keys):
split_sets = [ [] for x in self._private.particle_sets ]
split_indices = [ [] for x in self._private.particle_sets ]
if keys is None:
offset = 0
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex].extend(x.get_all_keys_in_store())
split_indices[setindex].extend(range(offset, offset + len(x)))
offset = offset + len(x)
else:
if isinstance(keys, set):
keys_array = numpy.array(list(keys))
else:
keys_array = numpy.array(keys)
indices_array = numpy.arange(len(keys_array))
for setindex, x in enumerate(self._private.particle_sets):
mask = self._in1d(keys_array, x.get_all_keys_in_store(), True)
split_sets[setindex] = keys_array[mask]
split_indices[setindex] = indices_array[mask]
return split_sets, split_indices
def _split_indices_over_sets(self, indices):
self._ensure_updated_set_properties()
split_sets = [ [] for x in self._private.particle_sets ]
split_indices = [ [] for x in self._private.particle_sets ]
offset = 0
if isinstance(indices, set):
indices = numpy.array(list(indices))
if indices is None or isinstance(indices,EllipsisType):
offset = 0
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex] = x.get_all_indices_in_store()
split_indices[setindex] = numpy.arange(offset, offset + len(x))
offset = offset + len(x)
elif len(indices) == 0:
for setindex, x in enumerate(self._private.particle_sets):
split_sets[setindex] = []
split_indices[setindex] = []
else:
result_indices_array = numpy.arange(len(indices))
for setindex, x in enumerate(self._private.particle_sets):
mask = numpy.logical_and( | |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main classes and methods for simulating continual federated learning.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from datetime import datetime
import json
import math
import os
import random
import sys
import concurrent.futures
import numpy as np
import tensorflow as tf
import linear_model
import utils
import word_lstm_model
tf.set_random_seed(99)
tf.app.flags.DEFINE_integer(
'algorithm', 0, 'Indicates which algorithm to run.'
'Options:'
' -1: baseline, runs FedAvg in each cycle.'
' 0: retraining without model splitting.'
' 1: algorithm 1 with model splitting.'
' 2: algorithm 2 with model splitting.')
tf.app.flags.DEFINE_string('model_type', 'linear', 'Options: linear, lstm.')
tf.app.flags.DEFINE_boolean('verbose', False,
'Verbose mode will output some intermediate tensors.')
tf.app.flags.DEFINE_integer('num_clients', 2, 'Total Number of clients.')
tf.app.flags.DEFINE_integer('num_cycles', 2,
'Total numnber of cycles. At most 18 for LSTM.')
tf.app.flags.DEFINE_float('fraction_clients', 1,
'Fraction of clients randomly selected in FedAvg')
tf.app.flags.DEFINE_integer(
'base_times', 1, 'Number of times to run one pass of all clients'
'for FedAvg.')
tf.app.flags.DEFINE_boolean(
'baseline_second_cycle_update', False,
'Whether to update server from the beginning of the second cycle.')
tf.app.flags.DEFINE_integer('altmin_rounds', 2,
'Number of rounds of Alt Min in algorithm 2.')
FLAGS = tf.app.flags.FLAGS
FLAGS(sys.argv)
# Algorithm names, which are consistent with indices from FLAGS.algorithm.
ALGORITHM_NAMES = ('algorithm 0', 'algorithm 1', 'algorithm 2', 'baseline')
CLIENT_BASE_SCOPE = 'Client'
SERVER_SCOPE = 'Server'
# Parameters to control the federated training.
NUM_CLIENTS_PER_ROUND = int(FLAGS.fraction_clients * FLAGS.num_clients)
# Params for the continual setting.
PRINT_FREQUENCY = 4
num_rounds_fedavg_base = int(
math.ceil(1.0 / FLAGS.fraction_clients) * FLAGS.base_times)
num_retrain_epochs_base = FLAGS.base_times
num_alternative_min_base = FLAGS.altmin_rounds * FLAGS.base_times
# for distinguish step summary and cycle summary
STEP_SUMM = 'step_summ'
CYCLE_SUMM = 'cycle_summ'
class AlgorithmConfig(object):
"""Configurations for the personalization algorithms."""
class ConfigBaseline(object):
num_epochs_per_round_fedavg = 1
num_rounds_fedavg = num_rounds_fedavg_base
# not directly used in the algorithm.
num_retrain_epochs = num_retrain_epochs_base
class ConfigAlgorithm0(object):
num_epochs_per_round_fedavg = 1
num_rounds_fedavg = num_rounds_fedavg_base
num_retrain_epochs = num_retrain_epochs_base
class ConfigAlgorithm1(object):
num_epochs_per_round_fedavg = 1
num_rounds_fedavg = num_rounds_fedavg_base
num_retrain_epochs = num_retrain_epochs_base
class ConfigAlgorithm2(object):
num_epochs_per_round_fedavg = 1
# used in the first cycle
num_rounds_fedavg = num_rounds_fedavg_base
num_retrain_epochs = num_retrain_epochs_base
# used from the second cycle
num_alternative_min = num_alternative_min_base
num_rounds_fedavg_alter_min = num_rounds_fedavg_base
num_retrain_epochs_alter_min = num_retrain_epochs_base
config_collections = {
-1: ConfigBaseline,
0: ConfigAlgorithm0,
1: ConfigAlgorithm1,
2: ConfigAlgorithm2,
}
def __init__(self, algorithm_id):
self.algorithm_config = self.config_collections[algorithm_id]()
class Agent(object):
"""Class for clients (id >= 0) and server (id == -1).
Attributes:
name: A unique string representing the name of the client, e.g., 'client_0'.
id: A non-nonnegative integer, consistent with the name, e.g., it is 0 for a
client named 'client_0'.
model: An instance of the model class.
update_ops_all: Update ops for all vars.
update_ops_shared: Update ops for shared vars.
dict_update_placeholders: A dict of var base name to its update-placeholder.
read_ops_all_vars: Read ops for all vars.
read_ops_shared_vars: Read ops for shared vars.
Raises:
ValueError: Unknown agent id.
"""
def __init__(self,
name,
data_generator,
model_class,
configs=None,
id_=-1,
initializer=None):
self.name = name
self.id = id_
self.data = data_generator(configs=configs, agent_id=id_)
with tf.name_scope(utils.get_train_name_scope(name)):
train_data = self.data.train_data_batch
model_train = model_class(
name,
is_training=True,
data=train_data,
config=configs.train_config,
initializer=initializer)
with tf.name_scope(utils.get_validation_name_scope(name)):
valid_data = self.data.validation_data_batch
model_validation = model_class(
name,
is_training=False,
data=valid_data,
reuse=True,
config=configs.train_config,
initializer=initializer)
with tf.name_scope(utils.get_test_name_scope(name)):
test_data = self.data.test_data_batch
model_test = model_class(
name,
is_training=False,
data=test_data,
reuse=True,
config=configs.eval_config,
initializer=initializer)
self.model_train = model_train
self.model_validation = model_validation
self.model_test = model_test
with tf.name_scope(utils.get_update_name_scope(self.name)):
# One could use any of the three models in this update name scope, since
# the vars are shared among them.
update_ops_shared, placeholders_shared = utils.generate_update_ops(
self.model_train.shared_vars)
update_ops_personal, placeholders_personal = utils.generate_update_ops(
self.model_train.personal_vars)
update_ops_all = update_ops_shared + update_ops_personal
# Merges two dicts of placeholders. placeholders_shared and
# placeholders_personal should have no overlap keys.
assert not set(placeholders_shared.keys()).intersection(
placeholders_personal.keys())
dict_update_placeholders = {}
dict_update_placeholders.update(placeholders_shared)
dict_update_placeholders.update(placeholders_personal)
self.update_ops_all = update_ops_all
self.update_ops_shared = update_ops_shared
self.dict_update_placeholders = dict_update_placeholders
self.read_ops_all_vars = {
k: v.value() for k, v in self.model_train.var_dict.items()
}
self.read_ops_shared_vars = {
utils.get_base_name(v): v.value() for v in self.model_train.shared_vars
}
def train(self,
sess,
num_epochs,
update_vars_type=utils.VARS_TYPE_ALL,
verbose=False):
"""Trains client for num_epochs.
Args:
sess: The TF Session.
num_epochs: Number of epochs for training.
update_vars_type: String. Options:
utils.VARS_TYPE_ALL means all vars.
utils.VARS_TYPE_SHARED means shared vars.
verbose: Whether to output intermediate states of the training process.
Raises:
ValueError: Unknown update_vars_type.
RuntimeError: When a server instance is being trained.
"""
print('Training on client {} for {} epoch(s) ...'.format(
self.id, num_epochs))
for _ in range(num_epochs):
self.train_one_epoch(sess, update_vars_type, verbose)
def train_one_epoch(self,
sess,
update_vars_type=utils.VARS_TYPE_ALL,
verbose=False):
"""Trains client for one epoch.
Args:
sess: The TF Session.
update_vars_type: String. Options:
utils.VARS_TYPE_ALL means all vars.
utils.VARS_TYPE_SHARED means shared vars.
verbose: Whether prints training status or not.
Raises:
ValueError: Unknown update_vars_type.
RuntimeError: When a server instance is being trained.
"""
if self.id >= 0:
self.model_train.run_one_epoch(sess, verbose, update_vars_type)
else:
raise RuntimeError('A server cannot be trained!')
def get_validation_loss(self, sess):
valid_loss = self.model_validation.run_one_epoch(sess, verbose=False)
print('validation loss: {}'.format(valid_loss))
return valid_loss
def get_test_loss(self, sess):
test_loss = self.model_test.run_one_epoch(sess, verbose=False)
print('test loss: {}'.format(test_loss))
return test_loss
def get_data_generator_and_model_class_and_configs():
"""Returns class names of data generator and model class."""
if FLAGS.model_type == 'linear':
return linear_model.RegressionData, linear_model.LinearRegression, linear_model.LinearRegressionConfig(
)
elif FLAGS.model_type == 'lstm':
return (word_lstm_model.TextData, word_lstm_model.WordLSTM,
word_lstm_model.LSTMConfig(FLAGS.config_type))
else:
raise ValueError('Unknown model type: %s' % FLAGS.model_type)
class Simulator(object):
"""Wraps clients, server and basic components for simulation."""
def __init__(self, num_clients, data_generator, model_class, configs):
self.num_clients = num_clients
self.initializer = tf.random_uniform_initializer(
-configs.train_config.init_scale, configs.train_config.init_scale)
clients = {}
for client_id in range(num_clients):
name = CLIENT_BASE_SCOPE + '_%d' % client_id
client = Agent(
name,
data_generator,
model_class,
configs=configs,
id_=client_id,
initializer=self.initializer)
clients[client.name] = client
self.clients = clients
server_name = SERVER_SCOPE
server = Agent(
server_name, data_generator, model_class, configs=configs, id_=-1)
self.server = server
# Adds global step for writing summaries.
self.global_step = tf.Variable(0, name='global_step')
self.global_step_0 = tf.Variable(0, name='global_step_0')
self.global_step_increment = self.global_step_0.assign_add(1)
self.global_step_reset = tf.assign(self.global_step_0, 0)
train_summary_scope = (
CLIENT_BASE_SCOPE + '.*/' + utils.TRAIN_NAME + '.*/' +
utils.LOSS_SUMMARY_NAME)
valid_summary_scope = (
CLIENT_BASE_SCOPE + '.*/' + utils.VALIDATION_NAME + '.*/' +
utils.LOSS_SUMMARY_NAME)
test_loss_scope = (
CLIENT_BASE_SCOPE + '.*/' + utils.TEST_NAME + '.*/' +
utils.LOSS_SUMMARY_NAME)
self.train_summaries = tf.summary.merge_all(scope=train_summary_scope)
self.valid_summaries = tf.summary.merge_all(scope=valid_summary_scope)
self.test_summaries = tf.summary.merge_all(scope=test_loss_scope)
# summary histograms
self.train_perplexities_placeholder = tf.placeholder(tf.float32, [None])
self.validation_perplexities_placeholder = tf.placeholder(
tf.float32, [None])
self.test_perplexities_placeholder = tf.placeholder(tf.float32, [None])
self.train_perplexities_histogram = tf.summary.histogram(
'perplexities_histogram/train', self.train_perplexities_placeholder)
self.validation_perplexities_histogram = tf.summary.histogram(
'perplexities_histogram/validation',
self.validation_perplexities_placeholder)
self.test_perplexities_histogram = tf.summary.histogram(
'perplexities_histogram/test', self.test_perplexities_placeholder)
# key will be the id of client.
# One record will be denoted as (step, cycle_id, perplexity)
self.train_log = collections.defaultdict(list)
self.validation_log = collections.defaultdict(list)
self.test_log = collections.defaultdict(list)
self.cycle_id = -1
# Used to have a different logdir for each run
self.logdir = None
def initialize(self, sess):
"""Reset global step and determine the log directories."""
# Resets global step to be 0.
sess.run(self.global_step_reset)
now = datetime.now()
time_string = now.strftime('%Y%m%d-%H%M%S')
if FLAGS.algorithm == 2:
# Creates subfolders for algorithm 2 since it has the parameter
# FLAGS.altmin_rounds
self.step_logdir = os.path.join(
FLAGS.output_path, STEP_SUMM, ALGORITHM_NAMES[FLAGS.algorithm],
'{}_altmin_rounds'.format(FLAGS.altmin_rounds), time_string)
self.cycle_logdir = os.path.join(
FLAGS.output_path, CYCLE_SUMM, ALGORITHM_NAMES[FLAGS.algorithm],
'{}_altmin_rounds'.format(FLAGS.altmin_rounds), time_string)
elif FLAGS.algorithm == -1:
# Creates subfolders for baseline since it has the parameter
# FLAGS.baseline_second_cycle_update
self.step_logdir = os.path.join(
FLAGS.output_path, STEP_SUMM, ALGORITHM_NAMES[FLAGS.algorithm],
'2nd_cycle_update_{}'.format(FLAGS.baseline_second_cycle_update),
time_string)
self.cycle_logdir = os.path.join(
FLAGS.output_path, CYCLE_SUMM, ALGORITHM_NAMES[FLAGS.algorithm],
'2nd_cycle_update_{}'.format(FLAGS.baseline_second_cycle_update),
time_string)
else:
self.step_logdir = os.path.join(FLAGS.output_path, STEP_SUMM,
ALGORITHM_NAMES[FLAGS.algorithm],
time_string)
self.cycle_logdir = os.path.join(FLAGS.output_path, CYCLE_SUMM,
ALGORITHM_NAMES[FLAGS.algorithm],
time_string)
def update_clients_from_server(self,
sess,
clients,
update_vars_type=utils.VARS_TYPE_ALL):
"""Updates clients vars from server vars.
Args:
sess: TF Session.
clients: A list of clients that will be updated from server.
update_vars_type: String. Options: utils.VARS_TYPE_ALL means all vars,
utils.VARS_TYPE_SHARED means shared vars.
Raises:
ValueError: Unknown update_vars_type.
"""
if update_vars_type == utils.VARS_TYPE_ALL:
server_vars = sess.run(self.server.read_ops_all_vars)
client_update_ops = [c.update_ops_all for c in clients]
client_update_ops_feed_dict = {}
for c in clients:
for var_base_name, placeholder in c.dict_update_placeholders.items():
client_update_ops_feed_dict[placeholder] = np.array(
[server_vars[var_base_name]])
elif update_vars_type == utils.VARS_TYPE_SHARED:
server_shared_vars = sess.run(self.server.read_ops_shared_vars)
client_update_ops = [c.update_ops_shared for c in clients]
client_update_ops_feed_dict = {}
for c in clients:
for shared_var in c.model_train.shared_vars:
var_base_name = utils.get_base_name(shared_var)
placeholder = c.dict_update_placeholders[var_base_name]
client_update_ops_feed_dict[placeholder] = np.array(
[server_shared_vars[var_base_name]])
else:
raise ValueError('Unknown vars update type: %s' % update_vars_type)
sess.run(client_update_ops, feed_dict=client_update_ops_feed_dict)
def update_server_from_clients(self,
sess,
clients,
update_vars_type=utils.VARS_TYPE_ALL):
"""Updates server vars to be the weighted average of client vars.
Args:
sess: TF Session.
clients: A list of | |
<reponame>dlasecki/z-quantum-core
"""General-purpose utilities."""
import warnings
import numpy as np
from scipy.linalg import expm
import random
import math
import operator
import sys
import json
import openfermion
import sympy
from openfermion import hermitian_conjugated
from openfermion import InteractionRDM
from openfermion.ops import SymbolicOperator
from networkx.readwrite import json_graph
import lea
import collections
import scipy
from typing import List, Tuple, Optional, Iterable
import importlib
SCHEMA_VERSION = "zapata-v1"
RNDSEED = 12345
def convert_dict_to_array(dictionary: dict) -> np.ndarray:
"""Convert a dictionary to a numpy array.
Args:
dictionary (dict): the dict containing the data
Returns:
array (numpy.array): a numpy array
"""
array = np.array(dictionary["real"])
if dictionary.get("imag"):
array = array + 1j * np.array(dictionary["imag"])
return array
def convert_array_to_dict(array: np.ndarray) -> dict:
"""Convert a numpy array to a dictionary.
Args:
array (numpy.array): a numpy array
Returns:
dictionary (dict): the dict containing the data
"""
dictionary = {}
if np.iscomplexobj(array):
dictionary["real"] = array.real.tolist()
dictionary["imag"] = array.imag.tolist()
else:
dictionary["real"] = array.tolist()
return dictionary
def dec2bin(number: int, length: int) -> List[int]:
"""Converts a decimal number into a binary representation
of fixed number of bits.
Args:
number: (int) the input decimal number
length: (int) number of bits in the output string
Returns:
A list of binary numbers
"""
if pow(2, length) < number:
sys.exit(
"Insufficient number of bits for representing the number {}".format(number)
)
bit_str = bin(number)
bit_str = bit_str[2 : len(bit_str)] # chop off the first two chars
bit_string = [int(x) for x in list(bit_str)]
if len(bit_string) < length:
len_zeros = length - len(bit_string)
bit_string = [int(x) for x in list(np.zeros(len_zeros))] + bit_string
return bit_string
def bin2dec(x: List[int]) -> int:
"""Converts a binary vector to an integer, with the 0-th
element being the most significant digit.
Args:
x: (list) a binary vector
Returns:
An integer
"""
dec = 0
coeff = 1
for i in range(len(x)):
dec = dec + coeff * x[len(x) - 1 - i]
coeff = coeff * 2
return dec
"""
The functions PAULI_X, PAULI_Y, PAULI_Z and IDENTITY below are used for
generating the generators of the Pauli group, which include Pauli X, Y, Z
operators as well as identity operator
"""
pauli_x = np.array([[0.0, 1.0], [1.0, 0.0]])
pauli_y = np.array([[0.0, -1.0j], [1.0j, 0.0]])
pauli_z = np.array([[1.0, 0.0], [0.0, -1.0]])
identity = np.array([[1.0, 0.0], [0.0, 1.0]])
def is_identity(u, tol=1e-15):
"""Test if a matrix is identity.
Args:
u: np.ndarray
Matrix to be checked.
tol: float
Threshold below which two matrix elements are considered equal.
"""
dims = np.array(u).shape
if dims[0] != dims[1]:
raise Exception("Input matrix is not square.")
return np.allclose(u, np.eye(u.shape[0]), atol=tol)
def is_unitary(u, tol=1e-15):
"""Test if a matrix is unitary.
Args:
u: array
Matrix to be checked.
tol: float
Threshold below which two matrix elements are considered equal.
"""
dims = np.array(u).shape
if dims[0] != dims[1]:
raise Exception("Input matrix is not square.")
test_matrix = np.dot(hermitian_conjugated(np.array(u)), u)
return is_identity(test_matrix, tol)
def compare_unitary(u1: np.ndarray, u2: np.ndarray, tol: float = 1e-15) -> bool:
"""Compares two unitary operators to see if they are equal to within a phase.
Args:
u1 (numpy.ndarray): First unitary operator.
u2 (numpy.ndarray): Second unitary operator.
tol (float): Threshold below which two matrix elements are considered equal.
Returns:
bool: True if the unitaries are equal to within the tolerance, ignoring
differences in global phase.
"""
if is_unitary(u1, tol) == False:
raise Exception("The first input matrix is not unitary.")
if is_unitary(u2, tol) == False:
raise Exception("The second input matrix is not unitary.")
test_matrix = np.dot(u1.conj().T, u2)
phase = test_matrix.item((0, 0)) ** -1
return is_identity(phase * test_matrix, tol)
def sample_from_probability_distribution(
probability_distribution: dict, n_samples: int
) -> collections.Counter:
"""
Samples events from a discrete probability distribution
Args:
probabilty_distribution: The discrete probability distribution to be used
for sampling. This should be a dictionary
n_samples (int): The number of samples desired
Returns:
A dictionary of the outcomes sampled. The key values are the things be sampled
and values are how many times those things appeared in the sampling
"""
if isinstance(probability_distribution, dict):
prob_pmf = lea.pmf(probability_distribution)
sampled_dict = collections.Counter(prob_pmf.random(n_samples))
return sampled_dict
else:
raise RuntimeError(
"Probability distribution should be a dictionary with key value \
being the thing being sampled and the value being probability of getting \
sampled "
)
def convert_bitstrings_to_tuples(bitstrings):
"""Given the measured bitstrings, convert each bitstring to tuple format
Args:
bitstrings (list of strings): the measured bitstrings
Returns:
A list of tuples
"""
# Convert from bitstrings to tuple format
measurements = []
for bitstring in bitstrings:
measurement = ()
for char in bitstring:
measurement = measurement + (int(char),)
measurements.append(measurement)
return measurements
def convert_tuples_to_bitstrings(tuples):
"""Given a set of measurement tuples, convert each to a little endian
string.
Args:
tuples (list of tuples): the measurement tuples
Returns:
A list of bitstrings
"""
# Convert from tuples to bitstrings
bitstrings = []
for tuple_item in tuples:
bitstring = ""
for bit in tuple_item:
bitstring = bitstring + str(bit)
bitstrings.append(bitstring)
return bitstrings
class ValueEstimate(float):
"""A class representing a numerical value and its precision corresponding
to an observable or an objective function
Args:
value (np.float): the numerical value or a value that can be converted to float
precision (np.float): its precision
Attributes:
value (np.float): the numerical value
precision (np.float): its precision
"""
def __init__(self, value, precision: Optional[float] = None):
super().__init__()
self.precision = precision
def __new__(cls, value, precision=None):
return super().__new__(cls, value)
@property
def value(self):
warnings.warn(
"The value attribute is deprecated. Use ValueEstimate object directly instead.",
DeprecationWarning,
)
return float(self)
def __eq__(self, other):
super_eq = super().__eq__(other)
if super_eq is NotImplemented:
return super_eq
return super_eq and self.precision == getattr(other, "precision", None)
def __ne__(self, other):
return not self == other
def __str__(self):
value_str = super().__str__()
if self.precision is not None:
return f"{value_str} ± {self.precision}"
else:
return f"{value_str}"
def to_dict(self):
"""Convert to a dictionary"""
data = {"schema": SCHEMA_VERSION + "-value_estimate"}
if type(self.value).__module__ == np.__name__:
data["value"] = self.value.item()
else:
data["value"] = self.value
if type(self.precision).__module__ == np.__name__:
data["precision"] = self.precision.item()
else:
data["precision"] = self.precision
return data
@classmethod
def from_dict(cls, dictionary):
"""Create an ExpectationValues object from a dictionary."""
value = dictionary["value"]
if "precision" in dictionary:
precision = dictionary["precision"]
return cls(value, precision)
else:
return cls(value)
def load_value_estimate(file):
"""Loads value estimate from a faile.
Args:
file (str or file-like object): the name of the file, or a file-like object.
Returns:
array (numpy.array): the array
"""
if isinstance(file, str):
with open(file, "r") as f:
data = json.load(f)
else:
data = json.load(file)
return ValueEstimate.from_dict(data)
def save_value_estimate(value_estimate, filename):
"""Saves value estimate to a file.
Args:
value_estimate (core.utils.ValueEstimate): the value estimate
file (str or file-like object): the name of the file, or a file-like object
"""
dictionary = value_estimate.to_dict()
dictionary["schema"] = SCHEMA_VERSION + "-value_estimate"
with open(filename, "w") as f:
f.write(json.dumps(dictionary, indent=2))
def load_list(file):
"""Load an array from a file.
Args:
file (str or file-like object): the name of the file, or a file-like object.
Returns:
array (list): the list
"""
if isinstance(file, str):
with open(file, "r") as f:
data = json.load(f)
else:
data = json.load(file)
return data["list"]
def save_list(array, filename, artifact_name=""):
"""Save expectation values to a file.
Args:
array (list): the list to be saved
file (str or file-like object): the name of the file, or a file-like object
artifact_name (str): optional argument to specify the schema name
"""
dictionary = {}
dictionary["schema"] = SCHEMA_VERSION + "-" + artifact_name + "-list"
dictionary["list"] = array
with open(filename, "w") as f:
f.write(json.dumps(dictionary, indent=2))
def save_generic_dict(dictionary, filename):
"""Save dictionary as json
Args:
dictionary (dict): the dict containing the data
"""
dictionary_stored = {"schema": SCHEMA_VERSION + "-dict"}
dictionary_stored.update(dictionary)
with open(filename, "w") as f:
f.write(json.dumps(dictionary_stored, indent=2))
def get_func_from_specs(specs):
"""
Return function based on given specs.
Args:
specs (dict): dictionary containing the following keys:
module_name: specifies from which module an function comes.
function_name: specifies the name of the function.
Returns:
callable: function defined by specs
"""
module_name = specs.pop("module_name")
module = importlib.import_module(module_name)
function_name = specs.pop("function_name")
return getattr(module, function_name)
def create_object(specs, **kwargs):
"""
Creates an object based on given specs.
Specs include information about module and function necessary to create the object,
as well as any additional input parameters for it.
Args:
specs (dict): dictionary containing the following keys:
module_name: specifies from which module an object comes.
function_name: specifies the name of the function used to create object.
Returns:
| |
already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Contact':
obj_ = Contact.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Contact = obj_
obj_.original_tagname_ = 'Contact'
elif nodeName_ == 'Address':
obj_ = Address.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Address = obj_
obj_.original_tagname_ = 'Address'
elif nodeName_ == 'AddressAncillaryDetail':
obj_ = AddressAncillaryDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.AddressAncillaryDetail = obj_
obj_.original_tagname_ = 'AddressAncillaryDetail'
# end class LocationContactAndAddress
class LocationDetail(GeneratedsSuper):
"""Describes an individual location providing a set of customer service
features."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, LocationId=None, StoreNumber=None, LocationContactAndAddress=None, SpecialInstructions=None, TimeZoneOffset=None, LocationType=None, LocationTypeForDisplay=None, Attributes=None, LocationCapabilities=None, PackageMaximumLimits=None, ClearanceLocationDetail=None, ServicingLocationDetails=None, AcceptedCurrency=None, LocationHolidays=None, MapUrl=None, EntityId=None, NormalHours=None, ExceptionalHours=None, HoursForEffectiveDate=None, CarrierDetails=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.LocationId = LocationId
self.LocationId_nsprefix_ = None
self.StoreNumber = StoreNumber
self.StoreNumber_nsprefix_ = None
self.LocationContactAndAddress = LocationContactAndAddress
self.LocationContactAndAddress_nsprefix_ = None
self.SpecialInstructions = SpecialInstructions
self.SpecialInstructions_nsprefix_ = None
self.TimeZoneOffset = TimeZoneOffset
self.TimeZoneOffset_nsprefix_ = None
self.LocationType = LocationType
self.validate_FedExLocationType(self.LocationType)
self.LocationType_nsprefix_ = None
self.LocationTypeForDisplay = LocationTypeForDisplay
self.LocationTypeForDisplay_nsprefix_ = None
if Attributes is None:
self.Attributes = []
else:
self.Attributes = Attributes
self.Attributes_nsprefix_ = None
if LocationCapabilities is None:
self.LocationCapabilities = []
else:
self.LocationCapabilities = LocationCapabilities
self.LocationCapabilities_nsprefix_ = None
self.PackageMaximumLimits = PackageMaximumLimits
self.PackageMaximumLimits_nsprefix_ = None
self.ClearanceLocationDetail = ClearanceLocationDetail
self.ClearanceLocationDetail_nsprefix_ = None
if ServicingLocationDetails is None:
self.ServicingLocationDetails = []
else:
self.ServicingLocationDetails = ServicingLocationDetails
self.ServicingLocationDetails_nsprefix_ = None
self.AcceptedCurrency = AcceptedCurrency
self.AcceptedCurrency_nsprefix_ = None
if LocationHolidays is None:
self.LocationHolidays = []
else:
self.LocationHolidays = LocationHolidays
self.LocationHolidays_nsprefix_ = None
self.MapUrl = MapUrl
self.MapUrl_nsprefix_ = None
self.EntityId = EntityId
self.EntityId_nsprefix_ = None
if NormalHours is None:
self.NormalHours = []
else:
self.NormalHours = NormalHours
self.NormalHours_nsprefix_ = None
if ExceptionalHours is None:
self.ExceptionalHours = []
else:
self.ExceptionalHours = ExceptionalHours
self.ExceptionalHours_nsprefix_ = None
if HoursForEffectiveDate is None:
self.HoursForEffectiveDate = []
else:
self.HoursForEffectiveDate = HoursForEffectiveDate
self.HoursForEffectiveDate_nsprefix_ = None
if CarrierDetails is None:
self.CarrierDetails = []
else:
self.CarrierDetails = CarrierDetails
self.CarrierDetails_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LocationDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LocationDetail.subclass:
return LocationDetail.subclass(*args_, **kwargs_)
else:
return LocationDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_LocationId(self):
return self.LocationId
def set_LocationId(self, LocationId):
self.LocationId = LocationId
def get_StoreNumber(self):
return self.StoreNumber
def set_StoreNumber(self, StoreNumber):
self.StoreNumber = StoreNumber
def get_LocationContactAndAddress(self):
return self.LocationContactAndAddress
def set_LocationContactAndAddress(self, LocationContactAndAddress):
self.LocationContactAndAddress = LocationContactAndAddress
def get_SpecialInstructions(self):
return self.SpecialInstructions
def set_SpecialInstructions(self, SpecialInstructions):
self.SpecialInstructions = SpecialInstructions
def get_TimeZoneOffset(self):
return self.TimeZoneOffset
def set_TimeZoneOffset(self, TimeZoneOffset):
self.TimeZoneOffset = TimeZoneOffset
def get_LocationType(self):
return self.LocationType
def set_LocationType(self, LocationType):
self.LocationType = LocationType
def get_LocationTypeForDisplay(self):
return self.LocationTypeForDisplay
def set_LocationTypeForDisplay(self, LocationTypeForDisplay):
self.LocationTypeForDisplay = LocationTypeForDisplay
def get_Attributes(self):
return self.Attributes
def set_Attributes(self, Attributes):
self.Attributes = Attributes
def add_Attributes(self, value):
self.Attributes.append(value)
def insert_Attributes_at(self, index, value):
self.Attributes.insert(index, value)
def replace_Attributes_at(self, index, value):
self.Attributes[index] = value
def get_LocationCapabilities(self):
return self.LocationCapabilities
def set_LocationCapabilities(self, LocationCapabilities):
self.LocationCapabilities = LocationCapabilities
def add_LocationCapabilities(self, value):
self.LocationCapabilities.append(value)
def insert_LocationCapabilities_at(self, index, value):
self.LocationCapabilities.insert(index, value)
def replace_LocationCapabilities_at(self, index, value):
self.LocationCapabilities[index] = value
def get_PackageMaximumLimits(self):
return self.PackageMaximumLimits
def set_PackageMaximumLimits(self, PackageMaximumLimits):
self.PackageMaximumLimits = PackageMaximumLimits
def get_ClearanceLocationDetail(self):
return self.ClearanceLocationDetail
def set_ClearanceLocationDetail(self, ClearanceLocationDetail):
self.ClearanceLocationDetail = ClearanceLocationDetail
def get_ServicingLocationDetails(self):
return self.ServicingLocationDetails
def set_ServicingLocationDetails(self, ServicingLocationDetails):
self.ServicingLocationDetails = ServicingLocationDetails
def add_ServicingLocationDetails(self, value):
self.ServicingLocationDetails.append(value)
def insert_ServicingLocationDetails_at(self, index, value):
self.ServicingLocationDetails.insert(index, value)
def replace_ServicingLocationDetails_at(self, index, value):
self.ServicingLocationDetails[index] = value
def get_AcceptedCurrency(self):
return self.AcceptedCurrency
def set_AcceptedCurrency(self, AcceptedCurrency):
self.AcceptedCurrency = AcceptedCurrency
def get_LocationHolidays(self):
return self.LocationHolidays
def set_LocationHolidays(self, LocationHolidays):
self.LocationHolidays = LocationHolidays
def add_LocationHolidays(self, value):
self.LocationHolidays.append(value)
def insert_LocationHolidays_at(self, index, value):
self.LocationHolidays.insert(index, value)
def replace_LocationHolidays_at(self, index, value):
self.LocationHolidays[index] = value
def get_MapUrl(self):
return self.MapUrl
def set_MapUrl(self, MapUrl):
self.MapUrl = MapUrl
def get_EntityId(self):
return self.EntityId
def set_EntityId(self, EntityId):
self.EntityId = EntityId
def get_NormalHours(self):
return self.NormalHours
def set_NormalHours(self, NormalHours):
self.NormalHours = NormalHours
def add_NormalHours(self, value):
self.NormalHours.append(value)
def insert_NormalHours_at(self, index, value):
self.NormalHours.insert(index, value)
def replace_NormalHours_at(self, index, value):
self.NormalHours[index] = value
def get_ExceptionalHours(self):
return self.ExceptionalHours
def set_ExceptionalHours(self, ExceptionalHours):
self.ExceptionalHours = ExceptionalHours
def add_ExceptionalHours(self, value):
self.ExceptionalHours.append(value)
def insert_ExceptionalHours_at(self, index, value):
self.ExceptionalHours.insert(index, value)
def replace_ExceptionalHours_at(self, index, value):
self.ExceptionalHours[index] = value
def get_HoursForEffectiveDate(self):
return self.HoursForEffectiveDate
def set_HoursForEffectiveDate(self, HoursForEffectiveDate):
self.HoursForEffectiveDate = HoursForEffectiveDate
def add_HoursForEffectiveDate(self, value):
self.HoursForEffectiveDate.append(value)
def insert_HoursForEffectiveDate_at(self, index, value):
self.HoursForEffectiveDate.insert(index, value)
def replace_HoursForEffectiveDate_at(self, index, value):
self.HoursForEffectiveDate[index] = value
def get_CarrierDetails(self):
return self.CarrierDetails
def set_CarrierDetails(self, CarrierDetails):
self.CarrierDetails = CarrierDetails
def add_CarrierDetails(self, value):
self.CarrierDetails.append(value)
def insert_CarrierDetails_at(self, index, value):
self.CarrierDetails.insert(index, value)
def replace_CarrierDetails_at(self, index, value):
self.CarrierDetails[index] = value
def validate_FedExLocationType(self, value):
result = True
# Validate type FedExLocationType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['FEDEX_AUTHORIZED_SHIP_CENTER', 'FEDEX_EXPRESS_STATION', 'FEDEX_FACILITY', 'FEDEX_FREIGHT_SERVICE_CENTER', 'FEDEX_GROUND_TERMINAL', 'FEDEX_HOME_DELIVERY_STATION', 'FEDEX_OFFICE', 'FEDEX_ONSITE', 'FEDEX_SELF_SERVICE_LOCATION', 'FEDEX_SHIPSITE', 'FEDEX_SHIP_AND_GET', 'FEDEX_SMART_POST_HUB']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on FedExLocationType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_LocationAttributesType(self, value):
result = True
# Validate type LocationAttributesType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['ACCEPTS_CASH', 'ALREADY_OPEN', 'CLEARANCE_SERVICES', 'COPY_AND_PRINT_SERVICES', 'DANGEROUS_GOODS_SERVICES', 'DIRECT_MAIL_SERVICES', 'DOMESTIC_SHIPPING_SERVICES', 'DROP_BOX', 'INTERNATIONAL_SHIPPING_SERVICES', 'LOCATION_IS_IN_AIRPORT', 'NOTARY_SERVICES', 'OBSERVES_DAY_LIGHT_SAVING_TIMES', 'OPEN_TWENTY_FOUR_HOURS', 'PACKAGING_SUPPLIES', 'PACK_AND_SHIP', 'PASSPORT_PHOTO_SERVICES', 'RETURNS_SERVICES', 'SIGNS_AND_BANNERS_SERVICE', 'SONY_PICTURE_STATION']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on LocationAttributesType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.LocationId is not None or
self.StoreNumber is not None or
self.LocationContactAndAddress is not None or
self.SpecialInstructions is not None or
self.TimeZoneOffset is not None or
self.LocationType is not None or
self.LocationTypeForDisplay is not None or
self.Attributes or
self.LocationCapabilities or
self.PackageMaximumLimits is not None or
self.ClearanceLocationDetail is not None or
self.ServicingLocationDetails or
self.AcceptedCurrency is not None or
self.LocationHolidays or
self.MapUrl is not None or
self.EntityId is not None or
self.NormalHours or
self.ExceptionalHours or
self.HoursForEffectiveDate or
self.CarrierDetails
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LocationDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'LocationDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='LocationDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='LocationDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LocationDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='LocationDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LocationId is not None:
namespaceprefix_ = self.LocationId_nsprefix_ + ':' if (UseCapturedNS_ and self.LocationId_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLocationId>%s</%sLocationId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.LocationId), input_name='LocationId')), namespaceprefix_ , eol_))
if self.StoreNumber is not None:
namespaceprefix_ = self.StoreNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.StoreNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStoreNumber>%s</%sStoreNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.StoreNumber), input_name='StoreNumber')), namespaceprefix_ , eol_))
if self.LocationContactAndAddress is not None:
namespaceprefix_ = self.LocationContactAndAddress_nsprefix_ + ':' if (UseCapturedNS_ and self.LocationContactAndAddress_nsprefix_) else ''
self.LocationContactAndAddress.export(outfile, level, namespaceprefix_, namespacedef_='', name_='LocationContactAndAddress', pretty_print=pretty_print)
if self.SpecialInstructions is not None:
namespaceprefix_ = self.SpecialInstructions_nsprefix_ + ':' if (UseCapturedNS_ and self.SpecialInstructions_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSpecialInstructions>%s</%sSpecialInstructions>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.SpecialInstructions), input_name='SpecialInstructions')), namespaceprefix_ , eol_))
if self.TimeZoneOffset is not None:
namespaceprefix_ = self.TimeZoneOffset_nsprefix_ + ':' if (UseCapturedNS_ and self.TimeZoneOffset_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimeZoneOffset>%s</%sTimeZoneOffset>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TimeZoneOffset), input_name='TimeZoneOffset')), namespaceprefix_ , eol_))
if self.LocationType is not None:
namespaceprefix_ = self.LocationType_nsprefix_ + ':' if (UseCapturedNS_ and self.LocationType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLocationType>%s</%sLocationType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.LocationType), input_name='LocationType')), namespaceprefix_ , eol_))
if self.LocationTypeForDisplay is | |
exact-matching
:return: tuple(newENname, translate_type)
"""
# first, if en name is already good (not blank and not JP and not a known exception), just keep it
if PREFER_EXISTING_ENGLISH_NAME and en and not en.isspace() and en.lower() not in FORBIDDEN_ENGLISH_NAMES \
and not translation_tools.needs_translate(en):
return en, 0
# do pretranslate here: better for exact matching against morphs that have sad/sad_L/sad_R etc
# TODO: save the pretranslate results so I don't need to do it twice more? meh, it runs just fine
indent, body, suffix = translation_tools.pre_translate(jp)
# second, jp name is already good english, copy jp name -> en name
if body and not body.isspace() and not translation_tools.needs_translate(body):
return (indent + body + suffix), 1
# third, see if this name is an exact match in the specific dict for this specific type
if specific_dict is not None and body in specific_dict:
return (indent + specific_dict[body] + suffix), 2
# if none of these pass, return nothing & type -1 to signfiy it is still in progress
return "", -1
STR_OR_STRLIST = TypeVar("STR_OR_STRLIST", str, List[str])
def google_translate(in_list: STR_OR_STRLIST, strategy=1) -> STR_OR_STRLIST:
"""
Take a list of strings & get them all translated by asking Google. Can use per-line strategy or new 'chunkwise' strategy.
:param in_list: list of JP or partially JP strings
:param strategy: 0=old per-line strategy, 1=new chunkwise strategy, 2=auto choose whichever needs less Google traffic
:return: list of strings probably pure EN, but sometimes odd unicode symbols show up
"""
input_is_str = isinstance(in_list, str)
if input_is_str: in_list = [in_list] # force it to be a list anyway so I don't have to change my structure
use_chunk_strat = True if strategy == 1 else False
# in_list -> pretrans -> jp_chunks_set -> jp_chunks -> jp_chunks_packets -> results_packets -> results
# jp_chunks + results -> google_dict
# pretrans + google_dict -> outlist
# 1. pre-translate to take care of common tasks
indents, bodies, suffixes = translation_tools.pre_translate(in_list)
# 2. identify chunks
jp_chunks_set = set()
# idea: walk & look for transition from en to jp?
for s in bodies: # for every string to translate,
rstart = 0
prev_islatin = True
is_latin = True
for i in range(len(s)): # walk along its length one char at a time,
# use "is_jp" here and not "is_latin" so chunks are defined to be only actual JP stuff and not unicode whatevers
is_latin = not translation_tools.is_jp(s[i])
# if char WAS latin but now is NOT latin, then this is the start of a range.
if prev_islatin and not is_latin:
rstart = i
# if it was jp and is now latin, then this is the end of a range (not including here). save it!
elif is_latin and not prev_islatin:
jp_chunks_set.add(s[rstart:i])
prev_islatin = is_latin
# now outside the loop... if i ended with a non-latin char, grab the final range & add that too
if not is_latin:
jp_chunks_set.add(s[rstart:len(s)])
# 3. remove chunks I can already solve
# maybe localtrans can solve one chunk but not the whole string?
# chunks are guaranteed to not be PART OF compound words. but they are probably compound words themselves.
# run local trans on each chunk individually, and if it succeeds, then DON'T send it to google.
localtrans_dict = dict()
jp_chunks = []
for chunk in list(jp_chunks_set):
trans = translation_tools.piecewise_translate(chunk, translation_tools.words_dict)
if translation_tools.is_jp(trans):
# if the localtrans failed, then the chunk needs to be sent to google later
jp_chunks.append(chunk)
else:
# if it passed, no need to ask google what they mean cuz I already have a good translation for this chunk
# this will be added to the dict way later
localtrans_dict[chunk] = trans
# 4. packetize them into fewer requests (and if auto, choose whether to use chunks or not)
jp_chunks_packets = packetize_translate_requests(jp_chunks)
jp_bodies_packets = packetize_translate_requests(bodies)
if strategy == 2: use_chunk_strat = (len(jp_chunks_packets) < len(jp_bodies_packets))
# 5. check the translate budget to see if I can afford this
if use_chunk_strat: num_calls = len(jp_chunks_packets)
else: num_calls = len(jp_bodies_packets)
global _DISABLE_INTERNET_TRANSLATE
if check_translate_budget(num_calls) and not _DISABLE_INTERNET_TRANSLATE:
core.MY_PRINT_FUNC("... making %d requests to Google Translate web API..." % num_calls)
else:
# no need to print failing statement, the function already does
# don't quit early, run thru the same full structure & eventually return a copy of the JP names
core.MY_PRINT_FUNC("Just copying JP -> EN while Google Translate is disabled")
_DISABLE_INTERNET_TRANSLATE = True
# 6. send chunks to Google
results_packets = []
if use_chunk_strat:
for d, packet in enumerate(jp_chunks_packets):
core.print_progress_oneline(d / len(jp_chunks_packets))
r = _single_google_translate(packet)
results_packets.append(r)
# 7. assemble Google responses & re-associate with the chunks
# order of inputs "jp_chunks" matches order of outputs "results"
results = unpacketize_translate_requests(results_packets) # unpack
google_dict = dict(zip(jp_chunks, results)) # build dict
print("#items=", len(in_list), "#chunks=", len(jp_chunks), "#requests=", len(jp_chunks_packets))
print(google_dict)
google_dict.update(localtrans_dict) # add dict entries from things that succeeded localtrans
google_dict.update(translation_tools.words_dict) # add the full-blown words dict to the chunk-translate results
# dict->list->sort->dict: sort the longest chunks first, VERY CRITICAL so things don't get undershadowed!!!
google_dict = dict(sorted(list(google_dict.items()), reverse=True, key=lambda x: len(x[0])))
# 8. piecewise translate using newly created dict
outlist = translation_tools.piecewise_translate(bodies, google_dict)
else:
# old style: just translate the strings directly and return their results
for d, packet in enumerate(jp_bodies_packets):
core.print_progress_oneline(d / len(jp_bodies_packets))
r = _single_google_translate(packet)
results_packets.append(r)
outlist = unpacketize_translate_requests(results_packets)
# last, reattach the indents and suffixes
outlist_final = [i + b + s for i, b, s in zip(indents, outlist, suffixes)]
if not _DISABLE_INTERNET_TRANSLATE:
# if i did use internet translate, print this line when done
core.MY_PRINT_FUNC("... done!")
# return
if input_is_str: return outlist_final[0] # if original input was a single string, then de-listify
else: return outlist_final # otherwise return as a list
################################################################################################################
helptext = '''====================
translate_to_english:
This tool fills out empty EN names in a PMX model with translated versions of the JP names.
It tries to do some intelligent piecewise translation using a local dictionary but goes to Google Translate if that fails.
Machine translation is never 100% reliable, so this is only a stopgap measure to eliminate all the 'Null_##'s and wrongly-encoded garbage and make it easier to use in MMD. A bad translation is better than none at all!
Also, Google Translate only permits ~100 requests per hour, if you exceed this rate you will be locked out for 24 hours (TODO: CONFIRM LOCKOUT TIME)
But my script has a built in limiter that will prevent you from translating if you would exceed the 100-per-hr limit.
'''
iotext = '''Inputs: PMX file "[model].pmx"\nOutputs: PMX file "[model]_translate.pmx"
'''
def showhelp():
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
def showprompt():
# print info to explain what inputs/outputs it needs/creates
core.MY_PRINT_FUNC(iotext)
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX model file:")
input_filename_pmx = core.prompt_user_filename(".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=True)
return pmx, input_filename_pmx
# class with named fields is a bit better than just a list of lists with prescribed order
class translate_entry:
def __init__(self, jp_old, en_old, cat_id, idx, en_new, trans_type):
self.jp_old = jp_old
self.en_old = en_old
self.cat_id = cat_id # aka category aka type
self.idx = idx # aka which bone
self.en_new = en_new
self.trans_type = trans_type
def __str__(self):
s = "jp_old:%s en_old:%s cat_id:%d idx:%d en_new:%s trans_type:%d" % \
(self.jp_old, self.en_old, self.cat_id, self.idx, self.en_new, self.trans_type)
return s
def translate_to_english(pmx: pmxstruct.Pmx, moreinfo=False):
# for each category,
# for each name,
# check for type 0/1/2 (already good, copy JP, exact match in special dict)
# create translate_entry regardless what happens
# do same thing for model name
# then for all that didn't get successfully translated,
# do bulk local piecewise translate: list(str) -> list(str)
# then for all that didn't get successfully translated,
# do bulk google piecewise translate: list(str) -> list(str)
# then sort the results
# then format & print the results
# step zero: set up the translator thingy
init_googletrans()
# if JP model name is empty, give it something. same for comment
if pmx.header.name_jp == "":
pmx.header.name_jp = "model"
if pmx.header.comment_jp == "":
pmx.header.comment_jp = "comment"
translate_maps = []
# repeat the following for each category of visible names:
# materials=4, bones=5, morphs=6, dispframe=7
cat_id_list = list(range(4,8))
category_list = [pmx.materials, pmx.bones, pmx.morphs, pmx.frames]
for cat_id, category in zip(cat_id_list, category_list):
# for each entry:
for d, item in enumerate(category):
# skip "special" display frames
if isinstance(item, pmxstruct.PmxFrame) and item.is_special: continue
# strip away newline and return just in case, i saw a few examples where they showed up
item.name_jp = item.name_jp.replace('\r','').replace('\n','')
item.name_en = item.name_en.replace('\r','').replace('\n','')
# try to apply "easy" translate methods
newname, source = easy_translate(item.name_jp, item.name_en, specificdict_dict[cat_id])
# build the "trans_entry" item from this result, regardless of pass/fail
newentry = translate_entry(item.name_jp, item.name_en, cat_id, d, newname, source)
# store it
translate_maps.append(newentry)
# model name is special cuz there's only one & its indexing is different
# but i'm doing the same stuff
pmx.header.name_jp = pmx.header.name_jp.replace('\r', '').replace('\n', '')
pmx.header.name_en = pmx.header.name_en.replace('\r', '').replace('\n', '')
# try to apply "easy" translate methods
newname, source = easy_translate(pmx.header.name_jp, pmx.header.name_en, None)
# build the "trans_entry" item from this result, regardless of pass/fail
newentry = translate_entry(pmx.header.name_jp, pmx.header.name_en, 0, 2, newname, source)
# store it
translate_maps.append(newentry)
if | |
from client import exception, embed_creator, discord_manager, ini_manager, json_manager, permissions, origin, server_timer
from client.external import admin, dc_bank, organizer, rsn_register, vote, xp_tracker, kc_tracker
from client.config import config as c, language as l
from discord.ext import commands
import discord, locale
class server(commands.Cog):
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
name = 'server'
# ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
def __init__(self, client):
self.client = client
@commands.command()
async def deactive(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
if str(ctx.message.channel.type) == 'text':
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['dc_bank']]
if ctx.author.id == c.CLIENT_ADMINISTRATION_ID or ctx.author.id == ctx.guild.owner.id or await permissions.get_user_permission(path, target_keys, target_values):
STRING = str(ctx.message.content).split(' ')
if len(STRING) == 2:
if STRING[1] == admin.admin.name:
path = c.GUILD_PATH['{}.ini'.format(admin.admin.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == dc_bank.dc_bank.name:
path = c.GUILD_PATH['{}.ini'.format(dc_bank.dc_bank.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == organizer.organizer.name:
path = c.GUILD_PATH['{}.ini'.format(organizer.organizer.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == rsn_register.rsn_register.name:
path = c.GUILD_PATH['{}.ini'.format(rsn_register.rsn_register.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == xp_tracker.xp_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(xp_tracker.xp_tracker.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == kc_tracker.kc_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(kc_tracker.kc_tracker.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
elif STRING[1] == vote.vote.name:
path = c.GUILD_PATH['{}.ini'.format(vote.vote.name)].format(ctx.guild.id)
await ini_manager.update_data('STATUS', 'STATUS', '0', path)
await ctx.send(l.server[guild_l]['msg_success_4'])
else:
await ctx.send(l.server[guild_l]['msg_1'])
else:
await ctx.send(l.user_permissions[guild_l]['msg_restricted_1'])
await ctx.message.delete()
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def active(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
if str(ctx.message.channel.type) == 'text':
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['dc_bank']]
if ctx.author.id == c.CLIENT_ADMINISTRATION_ID or ctx.author.id == ctx.guild.owner.id or await permissions.get_user_permission(path, target_keys, target_values):
STRING = str(ctx.message.content).split(' ')
if len(STRING) == 2:
path1 = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path1)
if STRING[1] == admin.admin.name:
path = c.GUILD_PATH['{}.ini'.format(admin.admin.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
admin_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['admin'])
role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['BAN']) if ini['SECTION1']['BAN'] != '' else None)
if role and admin_channel:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == dc_bank.dc_bank.name:
path = c.GUILD_PATH['{}.ini'.format(dc_bank.dc_bank.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
accounting_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['dc_bank'])
sponsor_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['sponsor'])
thanks_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['thanks'])
admins = await discord_manager.get_member(self.client, ctx.guild.id, int(ini['SECTION1']['ADMIN']) if ini['SECTION1']['ADMIN'] != '' else None)
discord_top_1_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION2']['DISCORD_TOP_1']) if ini['SECTION2']['DISCORD_TOP_1'] != '' else None)
discord_top_2_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION2']['DISCORD_TOP_2']) if ini['SECTION2']['DISCORD_TOP_2'] != '' else None)
discord_top_3_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION2']['DISCORD_TOP_3']) if ini['SECTION2']['DISCORD_TOP_3'] != '' else None)
discord_top_4_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION2']['DISCORD_TOP_4']) if ini['SECTION2']['DISCORD_TOP_4'] != '' else None)
discord_top_5_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION2']['DISCORD_TOP_5']) if ini['SECTION2']['DISCORD_TOP_5'] != '' else None)
discord_top_1_count = ini['SECTION3']['DISCORD_TOP_1_COUNT'] if ini['SECTION3']['DISCORD_TOP_1_COUNT'] != '' else None
discord_top_2_count = ini['SECTION3']['DISCORD_TOP_2_COUNT'] if ini['SECTION3']['DISCORD_TOP_2_COUNT'] != '' else None
discord_top_3_count = ini['SECTION3']['DISCORD_TOP_3_COUNT'] if ini['SECTION3']['DISCORD_TOP_3_COUNT'] != '' else None
discord_top_4_count = ini['SECTION3']['DISCORD_TOP_4_COUNT'] if ini['SECTION3']['DISCORD_TOP_4_COUNT'] != '' else None
discord_top_5_count = ini['SECTION3']['DISCORD_TOP_5_COUNT'] if ini['SECTION3']['DISCORD_TOP_5_COUNT'] != '' else None
if admins and accounting_channel and sponsor_channel and thanks_channel and discord_top_1_role and discord_top_2_role and discord_top_3_role and discord_top_4_role and discord_top_5_role and discord_top_1_count and discord_top_2_count and discord_top_3_count and discord_top_4_count and discord_top_5_count:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == organizer.organizer.name:
path = c.GUILD_PATH['{}.ini'.format(organizer.organizer.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
event_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['EVENT_ROLE']) if ini['SECTION1']['EVENT_ROLE'] != '' else None)
if role and event_channel:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == rsn_register.rsn_register.name:
path = c.GUILD_PATH['{}.ini'.format(rsn_register.rsn_register.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
rsn_registration_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['rsn'])
role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['ROLE']) if ini['SECTION1']['ROLE'] != '' else None)
if role and rsn_registration_channel:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == xp_tracker.xp_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(xp_tracker.xp_tracker.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
event_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
xp_event_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['xp_event'])
event_winner = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['EVENT_WINNER']) if ini['SECTION1']['EVENT_WINNER'] != '' else None)
event_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['EVENT_ROLE']) if ini['SECTION1']['EVENT_ROLE'] != '' else None)
if event_winner and event_role and event_channel and xp_event_channel:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == kc_tracker.kc_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(kc_tracker.kc_tracker.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
event_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['events'])
kc_event_channel = await discord_manager.get_channel(self.client, ctx.guild.id, server_config['kc_event'])
event_winner = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['EVENT_WINNER']) if ini['SECTION1']['EVENT_WINNER'] != '' else None)
event_role = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['EVENT_ROLE']) if ini['SECTION1']['EVENT_ROLE'] != '' else None)
if event_winner and event_role and event_channel and kc_event_channel:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
elif STRING[1] == vote.vote.name:
path = c.GUILD_PATH['{}.ini'.format(vote.vote.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
restriction = await discord_manager.get_role(self.client, ctx.guild.id, int(ini['SECTION1']['RESTRICTION']) if ini['SECTION1']['RESTRICTION'] != '' else None)
count = ini['SECTION2']['COUNT'] if ini['SECTION2']['COUNT'] != '' else None
if restriction and count:
await ini_manager.update_data('STATUS', 'STATUS', '2', path)
await ctx.send(l.server[guild_l]['msg_success_3'])
else:
await ctx.send(l.server[guild_l]['msg_error_5'])
else:
await ctx.send(l.server[guild_l]['msg_1'])
else:
await ctx.send(l.user_permissions[guild_l]['msg_restricted_1'])
await ctx.message.delete()
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def setup(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
if str(ctx.message.channel.type) == 'text':
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['dc_bank']]
if ctx.author.id == c.CLIENT_ADMINISTRATION_ID or ctx.author.id == ctx.guild.owner.id or await permissions.get_user_permission(path, target_keys, target_values):
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 5:
if STRING[1] == admin.admin.name:
path = c.GUILD_PATH['{}.ini'.format(admin.admin.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == dc_bank.dc_bank.name:
path = c.GUILD_PATH['{}.ini'.format(dc_bank.dc_bank.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == organizer.organizer.name:
path = c.GUILD_PATH['{}.ini'.format(organizer.organizer.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == rsn_register.rsn_register.name:
path = c.GUILD_PATH['{}.ini'.format(rsn_register.rsn_register.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == xp_tracker.xp_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(xp_tracker.xp_tracker.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == kc_tracker.kc_tracker.name:
path = c.GUILD_PATH['{}.ini'.format(kc_tracker.kc_tracker.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
elif STRING[1] == vote.vote.name:
path = c.GUILD_PATH['{}.ini'.format(vote.vote.name)].format(ctx.guild.id)
value = await origin.find_and_replace(STRING[4])
status = await ini_manager.update_data(STRING[2], STRING[3], value, path)
if status:
await ctx.send(l.server[guild_l]['msg_success_2'])
else:
await ctx.send(l.server[guild_l]['msg_error_4'])
else:
await ctx.send(l.server[guild_l]['msg_error_2'])
else:
await ctx.send(l.server[guild_l]['msg_1'])
else:
await ctx.send(l.user_permissions[guild_l]['msg_restricted_1'])
await ctx.message.delete()
else:
await ctx.send(l.module_permissions['EN']['msg_restricted_pm'])
except Exception as error:
await exception.error(error)
@commands.command()
async def config(self, ctx):
try:
guild_l = await origin.get_language(ctx.guild.id)
if str(ctx.message.channel.type) == 'text':
path = c.GUILD_PATH['special_member.json'].format(ctx.guild.id)
target_keys = ['user_id', 'user_status']
target_values = [ctx.author.id, c.USER_PERMISSIONS['dc_bank']]
if ctx.author.id == c.CLIENT_ADMINISTRATION_ID or ctx.author.id == ctx.guild.owner.id or await permissions.get_user_permission(path, target_keys, target_values):
path1 = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
server_config = await json_manager.get_json(path1)
STATUS = False
MOD_STATUS = 0
TEXT = ''
TEXT2 = ''
TEXT3 = ''
STATUS_EMOTE = ['🟢', '🔴']
STRING = str(ctx.message.content).split(' ')
if len(STRING) >= 2:
if STRING[1] == 'channel':
if len(STRING) == 4:
for channel in c.DISCORD_CHANNEL[guild_l]:
if STRING[2] == channel:
STATUS = True
if STATUS:
CHID = await origin.find_and_replace(STRING[3])
if str(CHID).isdigit():
CH = await discord_manager.get_channel(self.client, ctx.guild.id, CHID)
if CH:
path = c.CLIENT_PATH['guild'] + str(ctx.guild.id) + c.CLIENT_JSON['server']
await json_manager.update_server_config(path, STRING[2], CH.id)
await ctx.send(l.server[guild_l]['msg_success_1'].format(STRING[3]))
else:
await ctx.send(l.server[guild_l]['msg_error_1'].format(STRING[3]))
else:
await ctx.send(l.server[guild_l]['msg_error_1'].format(STRING[3]))
else:
await ctx.send(l.server[guild_l]['msg_error_2'].format(STRING[3]))
else:
await ctx.send(l.server[guild_l]['msg_badformat_1'])
if STRING[1] == 'install':
if len(STRING) == 4:
if len(STRING[2]) == 2 and await server_timer.check_status_timezone(STRING[3]):
path = c.CLIENT_JSON['guild']
target_keys = ['language', 'time_line']
target_values = [STRING[2], STRING[3]]
await json_manager.update(path, 'guild_id', ctx.guild.id, target_keys, target_values)
await ctx.send(l.server[guild_l]['msg_success_5'].format(STRING[3]))
else:
await ctx.send(l.server[guild_l]['msg_badformat_2'])
else:
await ctx.send(l.server[guild_l]['msg_badformat_2'])
if STRING[1] == admin.admin.name:
path = c.GUILD_PATH['{}.ini'.format(admin.admin.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
TEXT = l.server[guild_l]['server.config.admin_1'].format('<#{}>'.format(server_config['admin']) if server_config['admin'] else '', '<@&{}>'.format(ini['SECTION1']['BAN']) if ini['SECTION1']['BAN'] else '')
TEXT3 = l.server[guild_l]['server.config.active_deactive'].format(admin.admin.name, admin.admin.name)
MOD_STATUS = int(ini['STATUS']['STATUS'])
elif STRING[1] == dc_bank.dc_bank.name:
path = c.GUILD_PATH['{}.ini'.format(dc_bank.dc_bank.name)].format(ctx.guild.id)
ini = await ini_manager.get_ini(path)
TEXT = l.server[guild_l]['server.config.dc_bank_1'].format(
'<#{}>'.format(server_config['dc_bank']) if server_config['dc_bank'] else '',
'<#{}>'.format(server_config['sponsor']) if server_config['sponsor'] else '',
'<#{}>'.format(server_config['thanks']) if server_config['thanks'] else '',
'<@{}>'.format(ini['SECTION1']['ADMIN']) if ini['SECTION1']['ADMIN'] else l.server[guild_l]['configuration']['extra_1'],
'<@&{}>'.format(ini['SECTION2']['DISCORD_TOP_1']) if ini['SECTION2']['DISCORD_TOP_1'] else '',
'<@&{}>'.format(ini['SECTION2']['DISCORD_TOP_2']) if ini['SECTION2']['DISCORD_TOP_2'] else '',
'<@&{}>'.format(ini['SECTION2']['DISCORD_TOP_3']) if ini['SECTION2']['DISCORD_TOP_3'] else '',
'<@&{}>'.format(ini['SECTION2']['DISCORD_TOP_4']) if ini['SECTION2']['DISCORD_TOP_4'] else '',
'<@&{}>'.format(ini['SECTION2']['DISCORD_TOP_5']) if ini['SECTION2']['DISCORD_TOP_5'] else '')
TEXT2 = l.server[guild_l]['server.config.dc_bank_2'].format(
| |
<reponame>Ibrahim2595/virtual_learning_softrobotics
import cv2
import mediapipe as mp
import numpy as np
import json
import time
import socket
import sys
# color in BGR
text_color = (255, 255, 255)
arrow_color = (0, 0, 255)
arrow_thickness = 6 #px
arrow_length = 200
cmd_text_position = [50,50]
last_cmd_expire_time = {}
ports_text_position = [100,100]
all_cmd = set()
current_states = {
'action': None,
'event_type': 'tracking',
'displaying_action': None,
}
def read_configs(file_name):
f = open(file_name)
# returns JSON object as a dictionary
actions = json.load(f)
f.close()
return actions
# Calculate angles
def calculate_angle(a, b, c):
a = np.array(a) # First
b = np.array(b) # Mid
c = np.array(c) # End
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
return angle
def calculate_arrow_endpoints(direction, angle, anchors, length = arrow_length):
# TODO: calculate realtime end point after arm orientation is changed
if len(anchors) == 1:
start_point = anchors[0]
elif len(anchors) > 0:
start_point = np.average(np.array(anchors), 0)
start_point = tuple(np.multiply(start_point, [1280, 720]).astype(int))
if direction == "right":
end_point = (start_point[0]-length, start_point[1])
return start_point, end_point
def calculate_circle(image,circle_direction, angle, anchors, circle_center, length = arrow_length):
center_point = tuple(np.multiply(circle_center, [1280, 720]).astype(int))
radians = np.arctan2(anchors[0][1] - anchors[1][1], anchors[0][0] - anchors[1][0])
arm_angle =radians * 180.0 / np.pi
radius = 150
axes = (radius, radius)
startAngle = arm_angle
delta =60
if circle_direction == "down" and arm_angle < 0:
delta *= -1
thickness = 16
WHITE = (255, 255, 255)
cv2.ellipse(image, center_point, axes, startAngle, 0, delta, WHITE, thickness)
def get_anchors(action_def, all_coorddinates):
anchors_str = action_def["illustration"]["anchors"]
anchors_point = [all_coorddinates[point_str] for point_str in anchors_str]
return anchors_point
def visualize_angles (landmarks, mp_pose, image):
# Get coordinates
left_hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
left_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
left_elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
left_wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
left_knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
left_ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
right_hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
right_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
right_elbow = [landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value].y]
right_wrist = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
right_knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
right_ankle = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y]
all_coorddinates = {
"left_hip": left_hip,
"left_shoulder": left_shoulder,
"left_elbow": left_elbow,
"left_wrist": left_wrist,
"left_knee": left_knee,
"left_ankle": left_ankle,
"right_hip": right_hip,
"right_shoulder": right_shoulder,
"right_elbow": right_elbow,
"right_wrist": right_wrist,
"right_knee": right_knee,
"right_ankle": right_ankle,
}
# Calculate angle
left_shoulder_angle = calculate_angle(left_elbow, left_shoulder, left_hip)
right_shoulder_angle = calculate_angle(right_elbow, right_shoulder, right_hip)
left_arm_angle = calculate_angle(left_shoulder, left_elbow, left_wrist)
right_arm_angle = calculate_angle(right_shoulder, right_elbow, right_wrist)
left_knee_angle = calculate_angle(left_hip, left_knee, left_ankle)
right_knee_angle = calculate_angle(right_hip, right_knee, right_ankle)
left_leg_angle = calculate_angle(left_shoulder, left_hip, left_knee)
right_leg_angle = calculate_angle(right_shoulder, right_hip, right_knee)
# Visualize angle
cv2.putText(image, 'Left_shoulder: {:.2f}'.format(left_shoulder_angle),
tuple(np.multiply(left_shoulder, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Right_shoulder: {:.2f}'.format(right_shoulder_angle),
tuple(np.multiply(right_shoulder, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Left_arm: {:.2f}'.format(left_arm_angle),
tuple(np.multiply(left_elbow, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Right_arm: {:.2f}'.format(right_arm_angle),
tuple(np.multiply(right_elbow, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Left_knee: {:.2f}'.format(left_knee_angle),
tuple(np.multiply(left_knee, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Right_knee: {:.2f}'.format(right_knee_angle),
tuple(np.multiply(right_knee, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Left_leg: {:.2f}'.format(left_leg_angle),
tuple(np.multiply(left_hip, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
cv2.putText(image, 'Right_leg: {:.2f}'.format(right_leg_angle),
tuple(np.multiply(right_hip, [1280, 720]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2, cv2.LINE_AA
)
return all_coorddinates
def generate_events(action_def):
event = {
"type": current_states['event_type'],
"action": current_states['action'],
"schedule": [
{
"t": 0,
"a": "+",
"pwm": "255",
"ports": action_def['ports']
},
{
"t": action_def['inflate_duration'],
"a": "!",
"pwm": "255",
"ports": [
1,
1,
1,
1,
1
]
}
]
}
return event
def generate_port_events(port_key):
port_key = port_key- ord('0')
if port_key == 1:
ports = [1,0,0,0,0]
elif port_key == 2:
ports = [0,1,0,0,0]
elif port_key == 3:
ports = [0,0,1,0,0]
else:
ports = [1,1,1,1,1]
event = {
"type": 'control',
'action': 'port',
"schedule": [
{
"t":0,
"a": "+",
"pwm": "255",
"ports": ports
}
]
}
return event
async def inflate_ports(image, action_def, websocket):
cmd = action_def['command']
current_ports = action_def['ports']
duration= 1.0* action_def['inflate_duration']/1000
if time.time() > duration + last_cmd_expire_time[cmd]:
deflate_ports(current_ports, image)
else:
cv2.putText(image, f'Inflating Ports : {current_ports} , {duration} s',
ports_text_position,
cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2, cv2.LINE_AA
)
if current_states['action'] is not None:
await websocket.send(json.dumps(generate_events(action_def)))
current_states['event_type'] = 'tracking'
current_states['action'] = None
def displaying_deflate(ports_array, image):
cv2.putText(image, f'Deflating Ports : {ports_array}',
ports_text_position,
cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2, cv2.LINE_AA
)
async def execute_action(actions, current_action, image, websocket):
# draw arrow to show motion direction
action_def = actions[current_action]
await inflate_ports(image, action_def, websocket)
def displaying(actions,image, displaying_action, all_coorddinates):
if displaying_action not in actions:
cv2.putText(image, f'{displaying_action}',
ports_text_position,
cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2, cv2.LINE_AA
)
return
action_def = actions[displaying_action]
anchors = get_anchors(action_def, all_coorddinates)
circle_center = all_coorddinates[action_def["illustration"]["circle_center"]]
current_ports = action_def["ports"]
angle = action_def["illustration"]["arrow_angle"]
direction = action_def["illustration"]["arrow_direction"]
circle_direction = action_def["illustration"]["circle_direction"]
end_points = calculate_arrow_endpoints(direction, angle, anchors)
calculate_circle(image,circle_direction, angle, anchors, circle_center)
# cv2.arrowedLine(image, end_points[0], end_points[1],
# arrow_color, arrow_thickness)
cmd = action_def['command']
current_ports = action_def['ports']
duration= 1.0* action_def['inflate_duration']/1000
if time.time() > duration*2 + last_cmd_expire_time[cmd]:
current_states["displaying_action"] = None
elif time.time() > duration + last_cmd_expire_time[cmd]:
displaying_deflate(current_ports, image)
else:
cv2.putText(image, f'Inflating Ports : {current_ports} , {duration} s',
ports_text_position,
cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2, cv2.LINE_AA
)
async def remote_cv(websocket):
print(f"start remote_cv")
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
cap = cv2.VideoCapture(0)
actions = read_configs('actions.json')
for action, values in actions.items():
all_cmd.add(values['command'])
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
ret, frame = cap.read()
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# cv2.putText(image, f'Keys: {all_cmd}',
# cmd_text_position,
# cv2.FONT_HERSHEY_SIMPLEX, 1.5, text_color, 2, cv2.LINE_AA
# )
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
all_coorddinates = visualize_angles(landmarks, mp_pose, image)
if current_states['action'] is not None and current_states['event_type'] == 'motion':
await execute_action(actions, current_states['action'], image, websocket)
if current_states["displaying_action"] is not None:
displaying(actions,image, current_states["displaying_action"], all_coorddinates)
except AttributeError as err:
# print(f"Unexpected {err=}, {type(err)=}")
pass
except BaseException as err:
print(f"Unexpected {err=}, {type(err)=}")
raise
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
)
cv2.imshow('Iris Demo', image)
key_pressed = cv2.waitKey(10) & 0xFF
if key_pressed == ord('q'):
current_states['action'] = 'quit'
current_states['event_type'] = 'control'
event = {
"type": 'q',
}
await websocket.send(json.dumps(event))
break
elif key_pressed == ord(' '):
current_states['action'] = 'stop'
current_states['event_type'] = 'control'
current_states['displaying_action'] = 'stop'
event = {
"type": 'control',
'action': 'stop',
"schedule": [
{
"t":0,
"a": "!",
"pwm": "255",
"ports": [
1,
1,
1,
1,
1
]
}
]
}
await websocket.send(json.dumps(event))
elif key_pressed >= ord('1') and key_pressed <= ord('6'):
current_states['displaying_action'] = 'port control '+str(key_pressed-ord('0'))
current_states['event_type'] = 'control'
event = generate_port_events(key_pressed)
await websocket.send(json.dumps(event))
elif key_pressed == ord('r'):
current_states['action'] = 'arm_down_raise'
current_states['displaying_action'] = 'arm_down_raise'
current_states['event_type'] = 'motion'
last_cmd_expire_time['r'] = time.time()
elif key_pressed == ord('s'):
current_states['event_type'] = 'motion'
current_states['action'] = 'arm_bent_straighten'
current_states['displaying_action'] = 'arm_bent_straighten'
last_cmd_expire_time['s'] = time.time()
cap.release()
cv2.destroyAllWindows()
def my_local_cv(file_path, use_recording=True):
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
if use_recording:
cap= cv2.VideoCapture(file_path)
else:
cap = cv2.VideoCapture(0)
actions = read_configs('actions.json')
for action, values in actions.items():
all_cmd.add(values['command'])
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
break
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# cv2.putText(image, f'Keys: {all_cmd}',
# cmd_text_position,
# cv2.FONT_HERSHEY_SIMPLEX, 1.5, text_color, 2, cv2.LINE_AA
# )
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
all_coorddinates = visualize_angles(landmarks, mp_pose, image)
# if current_states['action'] is not None and current_states['event_type'] == 'motion':
# await execute_action(actions, current_states['action'], image, websocket)
# if current_states["displaying_action"] is not None:
# displaying(actions,image, current_states["displaying_action"], all_coorddinates)
except AttributeError as err:
# print(f"Unexpected {err=}, {type(err)=}")
pass
except BaseException as err:
print(f"Unexpected {err=}, {type(err)=}")
raise
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
)
cv2.imshow('Mediapipe Feed', image)
key_pressed = cv2.waitKey(10) & 0xFF
if key_pressed == ord('q'):
current_states['action'] = 'quit'
current_states['event_type'] = 'control'
current_states['displaying_action'] = 'quit'
event = {
"type": 'q',
}
# await websocket.send(json.dumps(event))
break
elif key_pressed == ord(' '):
current_states['action'] = 'stop'
current_states['event_type'] = 'control'
event = {
"type": 'control',
'action': 'stop',
"schedule": [
{
"t":0,
"a": "!",
"pwm": "255",
"ports": [
1,
1,
1,
1,
1
]
}
]
}
# await websocket.send(json.dumps(event))
elif key_pressed == ord('r'):
current_states['action'] = 'arm_down_raise'
current_states['displaying_action'] = 'arm_down_raise'
current_states['event_type'] = 'motion'
last_cmd_expire_time['r'] = time.time()
elif key_pressed == ord('s'):
current_states['event_type'] = 'motion'
current_states['action'] = 'arm_bent_straighten'
current_states['displaying_action'] = 'arm_bent_straighten'
last_cmd_expire_time['s'] = time.time()
cap.release()
cv2.destroyAllWindows()
# def main(argv):
if __name__ == "__main__":
file_path = sys.argv[1]
| |
from __future__ import print_function
import inspect
from math import pi, sqrt, factorial
import qutip
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches as mpatches
from matplotlib.gridspec import GridSpec
from os import path
from scipy.special import genlaguerre
from scipy.linalg import svd, expm
from scipy.optimize import brute
from scipy import signal
from mpl_toolkits.axes_grid1 import ImageGrid
import shutil
__all__ = [
'Reporter', 'print_costs', 'print_grads', 'save_waves', 'plot_waves',
'save_script', 'liveplot_waves', 'liveplot_prop', 'plot_fidelity', 'plot_unitary', 'plot_cwigs',
'verify_from_setup', 'verify_master_equation', 'plot_matrix', 'plot_states',
'verify_sensitivity', 'verify_dispersion_sensitivity', 'verify_with_response',
'set_output_fmt', 'plot_penalties', 'plot_trajectories', 'cutoff'
]
def run_reporter(fn, data):
args = [data[k] for k in inspect.getargspec(fn).args if k != 'self']
fn(*args)
OUTPUT_FMT = 'pdf'
def set_output_fmt(fmt):
"""
Set the file suffix used for matplotlib.savefig. By default this is pdf
"""
global OUTPUT_FMT
OUTPUT_FMT = fmt
class Reporter(object):
"""
Base reporter class. Subclass and implement run method to use
Parameters
----------
spacing : int
Number of iterations to perform between evaluations of this reporter
"""
def __init__(self, spacing=1):
self.spacing = spacing
self.n_call = 0
def __call__(self, force=False, **kwargs):
if force or self.n_call % self.spacing == 0:
args = [kwargs[k] for k in inspect.getargspec(self.run).args[1:]]
self.run(*args)
self.n_call += 1
def run(self, *args):
raise NotImplementedError
class print_costs(Reporter):
"""
Prints the current fidelity from each setup, and the cost from each penalty
"""
# TODO: Replace this with a Logging solution for better support of
# multiprocessing in Spyder (which dosn't let children print
# to STDOUT)
def run(self, fids, pen_costs, n_iter):
print(n_iter, '- Fids:', end=' ')
print(' '.join(['%.7g' % c for c in fids]), end=' ')
if len(pen_costs):
print('Penalties:', end=' ')
print(' '.join(['%.7g' % c for c in pen_costs]))
class cutoff(Reporter):
"""
Raise exception is we go too many rounds without going over a threshold
"""
def __init__(self, cut_rounds=10, cut_fid=.715):
super(cutoff, self).__init__()
self.cut_rounds = cut_rounds
self.cut_fid = cut_fid
def run(self, fids, pen_costs, n_iter):
if np.mean(fids) < self.cut_fid and n_iter>self.cut_rounds:
txt = 'Failed to get fid > %.3f in %d rounds' % (self.cut_fid, self.cut_rounds)
raise Exception(txt)
class print_grads(Reporter):
"""
Prints the maximum gradient value for both the control and auxiliary parameters
"""
def run(self, fid_grads, aux_fid_grads):
print('Max Fid Grad:', abs(fid_grads).max(), end=' ')
if aux_fid_grads.size:
print('Max Aux Grad:', abs(aux_fid_grads).max())
else:
print('')
class save_waves(Reporter):
"""
Saves the controls in a .npz file. To retrieve the data, use
``np.load('waves.npz')``, which returns a dictionary-like object.
Parameters
----------
wave_names : List of str
Names of the controls when saved in dictionary. There should be
N_CTRLS entries in this list.
"""
def __init__(self, wave_names, spacing):
super(save_waves, self).__init__(spacing)
self.wave_names = wave_names
def run(self, outdir, sim_controls, dt, n_ss, raw_controls, shape_func, response, tot_cost):
print('saving...')
wave_dict = {'sim_'+k:w for k, w in zip(self.wave_names, sim_controls)}
wave_dict.update({'raw_'+k:w for k, w in zip(self.wave_names, raw_controls)})
if response is not None:
pad = np.zeros((len(raw_controls), len(response)))
awg_controls = np.hstack([raw_controls * shape_func, pad])
else:
awg_controls = raw_controls * shape_func
wave_dict.update({k:w for k, w in zip(self.wave_names, awg_controls)})
wave_dict['sim_dt'] = dt / float(n_ss)
wave_dict['dt'] = dt
wave_dict['n_ss'] = n_ss
wave_dict['response'] = response
np.savez(path.join(outdir, 'waves.npz'), **wave_dict)
class plot_waves(Reporter):
"""
Uses matplotlib to plot the current waves, and saves them under
waves.pdf in the output directory. Since plotting with matplotlib
can be slow, make sure the spacing is set reasonably so plotting
does not dominate the execution time.
"""
def __init__(self, wave_names, spacing=5, iq_pairs=False, last_only=False):
super(plot_waves, self).__init__(spacing)
self.wave_names = wave_names
self.iq_pairs = iq_pairs
self.last_only = last_only
n_ax = len(wave_names)
if iq_pairs:
n_ax //= 2
self.fft_fig, self.fft_axes = plt.subplots(n_ax, 1)
else:
self.fig = plt.figure()
gs1 = GridSpec(n_ax, 2)
for i in range(n_ax/2):
self.fig.add_subplot(gs1[i*2, 0])
self.fig.add_subplot(gs1[i*2+1, 0])
self.fig.add_subplot(gs1[i*2:i*2+2, 1])
self.axes = self.fig.axes
def run(self, outdir, full_controls, dt, n_ss):
print('Plotting...')
sim_dt = dt / n_ss
wave_axes = [ax for idx,ax in enumerate(self.axes) if idx%3 in [0,1]]
fft_axes = [ax for idx,ax in enumerate(self.axes) if idx%3 in [2,]]
if 1:
#for ax_row in self.axes:
# for ax in ax_row:
for ax in self.axes:
lines = ax.get_lines()
ax.clear()
nlines = len(lines)
for idx, line in enumerate(lines):
xs = line.get_xdata()
ys = line.get_ydata()
alpha = (0.5*idx)/nlines + 0.2
ax.plot(xs, ys, 'k-', alpha=alpha)
# if self.last_only:
# for ax in self.axes:
# ax.clear()
if self.iq_pairs:
for ax, wave in zip(self.axes, full_controls[::2]):
ax.clear()
ax.plot(wave, label='I')
for ax, wave, name in zip(self.axes, full_controls[1::2], self.wave_names):
ax.plot(wave, label='Q')
ax.set_ylabel(name)
c_waves = full_controls[::2] + 1j*full_controls[1::2]
fft_waves = np.fft.fftshift(abs(np.fft.fft(c_waves, axis=1))**2)
fft_freqs = 1e3 * np.fft.fftshift(np.fft.fftfreq(c_waves.shape[1], sim_dt))
for ax, fft in zip(self.fft_axes, fft_waves):
ax.clear()
ax.plot(fft_freqs, fft)
ax.set_xlim(-80, 80)
self.fft_fig.savefig(path.join(outdir, 'waves_fft.%s' % OUTPUT_FMT))
else:
for idx, (ax, wave) in enumerate(zip(wave_axes, full_controls)):
ax.set_yticks(np.linspace(min(int(np.floor(min(wave))), -1),
max(int( np.ceil(max(wave))), 1),
5))
if idx != len(self.axes)-1:
ax.set_xticks([])
else:
ax.set_xticks(range(0, len(wave)+1, 100))
ax.plot([0, len(wave)], [0,0], 'k--', lw=0.5)
ax.set_xlim(0, len(wave))
ax.plot(wave, 'r-')
for idx, ax in enumerate(fft_axes):
c_waves = full_controls[2*idx] + 1j*full_controls[2*idx+1]
fft_wave = np.fft.fftshift(abs(np.fft.fft(c_waves))**2)
fft_freqs = 1e3 * np.fft.fftshift(np.fft.fftfreq(len(c_waves), sim_dt))
start = len(fft_wave) * (0.5 - .1) #p/m 50 MHz
stop = len(fft_wave) * (0.5 + .1)
ax.plot(fft_freqs[start:stop], fft_wave[start:stop], 'r-')
ax.set_yticklabels([])
if idx == 0:
ax.set_xticklabels([])
for ax, wave_name in zip(wave_axes, self.wave_names):
ax.set_title(wave_name, x=-0.075, y=0.25)
try:
self.fig.savefig(path.join(outdir, 'waves.%s' % OUTPUT_FMT))
except IOError:
print('*** Unable to save waves fig. Is it open?')
class save_script(Reporter):
"""
Saves the script calling this function in the output
directory. Is only ever evaluated once
"""
def __init__(self, script_name):
super(save_script, self).__init__()
self.script_name = script_name
self.copied = False
def run(self, outdir):
if not self.copied:
shutil.copy(self.script_name, outdir + '/script.py')
self.copied = True
class liveplot_waves(Reporter):
"""
Use the liveplot module to plot waves. Requires liveplot to be
installed and active::
pip install liveplot
python -m liveplot
"""
def __init__(self, wave_names, spacing=1):
super(liveplot_waves, self).__init__(spacing)
from liveplot import LivePlotClient
self.client = LivePlotClient()
self.client.clear()
self.wave_names = wave_names
def run(self, sim_controls, fids):
for wave, name in zip(sim_controls, self.wave_names):
self.client.plot_y(name, wave)
for i, fid in enumerate(fids):
self.client.append_y('fid%d' % i, fid)
self.client.append_y('log_infid%d' % i, np.log(1 - fid))
class liveplot_prop(Reporter):
"""
Use the liveplot module to plot waves. Requires liveplot to be
installed and active::
pip install liveplot
python -m liveplot
"""
def __init__(self, spacing=1):
super(liveplot_prop, self).__init__(spacing)
from liveplot import LivePlotClient
self.client = LivePlotClient()
self.client.clear()
def run(self, props):
for i, prop in enumerate(props):
self.client.plot_z('prop%d' % i, abs(prop))
class plot_fidelity(Reporter):
"""
Plots the progress of the fidelity as a function of iteration
"""
def __init__(self, spacing=1):
super(plot_fidelity, self).__init__(spacing)
self.all_fids = None
def run(self, outdir, fids):
n_fids = len(fids)
if self.all_fids is None:
self.all_fids = [[] for _ in range(n_fids)]
f1, ax1 = plt.subplots(1, 1)
f2, ax2 = plt.subplots(1, 1)
for fid_list, fid in zip(self.all_fids, fids):
fid_list.append(fid)
ax1.plot(range(len(fid_list)), fid_list, 's-')
ax2.plot(range(len(fid_list)),1 - np.array(fid_list), 's-')
ax2.set_yscale('log')
try:
f1.savefig(path.join(outdir, 'fidelity.%s' % OUTPUT_FMT))
f2.savefig(path.join(outdir, 'infidelity.%s' % OUTPUT_FMT))
except IOError:
print('*** Figure saving failed, is the pdf open elsewhere?')
plt.close(f1)
plt.close(f2)
class plot_penalties(Reporter):
"""
Plots the progress of the fidelity as a function of iteration
"""
def __init__(self, spacing=1):
super(plot_penalties, self).__init__(spacing)
def run(self, outdir, pen_hist):
if len(pen_hist) == 0:
return
pen_hist = np.array(pen_hist)
f, axes = plt.subplots(pen_hist.shape[1], 1)
for ax, pens in zip(axes, pen_hist.T):
ax.plot(pens)
f.savefig(path.join(outdir, 'penalties.%s' % OUTPUT_FMT))
plt.close(f)
class plot_unitary(Reporter):
def run(self, outdir, setups, props, fids, **kwargs):
U_target = setups[0].U_target
U_total = props[0]
fid = fids[0]
if U_target.shape[0] != U_target.shape[1]:
U_target = U_target.T
f, (ax1, ax2) = plt.subplots(1, 2)
plot_matrix(U_target, ax=ax1)
ax1.set_title('Target')
plot_matrix(U_total, ax=ax2)
ax2.set_title('Actual (fid = %.04f)' % fids[0])
f.savefig(path.join(outdir, 'unitary.%s' % OUTPUT_FMT))
plt.close(f)
class plot_states(Reporter):
def run(self, outdir, setups, props, fids, **kwargs):
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
plot_matrix(setups[0].inits.T, ax=ax1)
ax1.set_title('Initial')
plot_matrix(setups[0].finals.T, ax=ax2)
ax2.set_title('Final')
plot_matrix(props[0], ax=ax3)
ax3.set_title('Actual (fid = %.04f)' % fids[0])
f.savefig(path.join(outdir, 'states.%s' % OUTPUT_FMT))
plt.close(f)
class plot_trajectories(Reporter):
"""
Plot probability trajectories for a given setup.
"""
def __init__(self, setup, spacing, taylor_order=20):
super(plot_trajectories, self).__init__(spacing)
self.setup = setup
self.taylor_order = taylor_order
def run(self, outdir, sim_controls, aux_params, dt, n_ss):
print('Plotting trajectories...')
dt = dt / float(n_ss)
setup = self.setup
t_order = self.taylor_order
f, axes = plt.subplots(len(self.setup.inits), 1)
for i_state, (init, final, ax) in enumerate(zip(self.setup.inits, self.setup.finals, axes)):
probs = []
psi = init.copy()
for i, time_slice in enumerate(sim_controls.T):
L = -1j * dt * (setup.H0 + sum(c*Hc for c,Hc in zip(time_slice, setup.Hcs)))
psi_k = psi
for k in range(1, t_order+1):
psi_k = L.dot(psi_k) / k
psi += psi_k
probs.append(np.abs(psi)**2)
ovlp = np.abs(np.sum(final.conj() | |
from fontTools.misc.fixedTools import floatToFixedToFloat
from fontTools.misc.testTools import stripVariableItemsFromTTX
from fontTools.misc.textTools import Tag
from fontTools import ttLib
from fontTools import designspaceLib
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
from fontTools.ttLib.tables import _f_v_a_r, _g_l_y_f
from fontTools.ttLib.tables import otTables
from fontTools.ttLib.tables.TupleVariation import TupleVariation
from fontTools import varLib
from fontTools.varLib import instancer
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib import builder
from fontTools.varLib import featureVars
from fontTools.varLib import models
import collections
from copy import deepcopy
from io import BytesIO, StringIO
import logging
import os
import re
from types import SimpleNamespace
import pytest
# see Tests/varLib/instancer/conftest.py for "varfont" fixture definition
TESTDATA = os.path.join(os.path.dirname(__file__), "data")
@pytest.fixture(params=[True, False], ids=["optimize", "no-optimize"])
def optimize(request):
return request.param
@pytest.fixture
def fvarAxes():
wght = _f_v_a_r.Axis()
wght.axisTag = Tag("wght")
wght.minValue = 100
wght.defaultValue = 400
wght.maxValue = 900
wdth = _f_v_a_r.Axis()
wdth.axisTag = Tag("wdth")
wdth.minValue = 70
wdth.defaultValue = 100
wdth.maxValue = 100
return [wght, wdth]
def _get_coordinates(varfont, glyphname):
# converts GlyphCoordinates to a list of (x, y) tuples, so that pytest's
# assert will give us a nicer diff
return list(varfont["glyf"].getCoordinatesAndControls(glyphname, varfont)[0])
class InstantiateGvarTest(object):
@pytest.mark.parametrize("glyph_name", ["hyphen"])
@pytest.mark.parametrize(
"location, expected",
[
pytest.param(
{"wdth": -1.0},
{
"hyphen": [
(27, 229),
(27, 310),
(247, 310),
(247, 229),
(0, 0),
(274, 0),
(0, 536),
(0, 0),
]
},
id="wdth=-1.0",
),
pytest.param(
{"wdth": -0.5},
{
"hyphen": [
(33.5, 229),
(33.5, 308.5),
(264.5, 308.5),
(264.5, 229),
(0, 0),
(298, 0),
(0, 536),
(0, 0),
]
},
id="wdth=-0.5",
),
# an axis pinned at the default normalized location (0.0) means
# the default glyf outline stays the same
pytest.param(
{"wdth": 0.0},
{
"hyphen": [
(40, 229),
(40, 307),
(282, 307),
(282, 229),
(0, 0),
(322, 0),
(0, 536),
(0, 0),
]
},
id="wdth=0.0",
),
],
)
def test_pin_and_drop_axis(self, varfont, glyph_name, location, expected, optimize):
instancer.instantiateGvar(varfont, location, optimize=optimize)
assert _get_coordinates(varfont, glyph_name) == expected[glyph_name]
# check that the pinned axis has been dropped from gvar
assert not any(
"wdth" in t.axes
for tuples in varfont["gvar"].variations.values()
for t in tuples
)
def test_full_instance(self, varfont, optimize):
instancer.instantiateGvar(
varfont, {"wght": 0.0, "wdth": -0.5}, optimize=optimize
)
assert _get_coordinates(varfont, "hyphen") == [
(33.5, 229),
(33.5, 308.5),
(264.5, 308.5),
(264.5, 229),
(0, 0),
(298, 0),
(0, 536),
(0, 0),
]
assert "gvar" not in varfont
def test_composite_glyph_not_in_gvar(self, varfont):
"""The 'minus' glyph is a composite glyph, which references 'hyphen' as a
component, but has no tuple variations in gvar table, so the component offset
and the phantom points do not change; however the sidebearings and bounding box
do change as a result of the parent glyph 'hyphen' changing.
"""
hmtx = varfont["hmtx"]
vmtx = varfont["vmtx"]
hyphenCoords = _get_coordinates(varfont, "hyphen")
assert hyphenCoords == [
(40, 229),
(40, 307),
(282, 307),
(282, 229),
(0, 0),
(322, 0),
(0, 536),
(0, 0),
]
assert hmtx["hyphen"] == (322, 40)
assert vmtx["hyphen"] == (536, 229)
minusCoords = _get_coordinates(varfont, "minus")
assert minusCoords == [(0, 0), (0, 0), (422, 0), (0, 536), (0, 0)]
assert hmtx["minus"] == (422, 40)
assert vmtx["minus"] == (536, 229)
location = {"wght": -1.0, "wdth": -1.0}
instancer.instantiateGvar(varfont, location)
# check 'hyphen' coordinates changed
assert _get_coordinates(varfont, "hyphen") == [
(26, 259),
(26, 286),
(237, 286),
(237, 259),
(0, 0),
(263, 0),
(0, 536),
(0, 0),
]
# check 'minus' coordinates (i.e. component offset and phantom points)
# did _not_ change
assert _get_coordinates(varfont, "minus") == minusCoords
assert hmtx["hyphen"] == (263, 26)
assert vmtx["hyphen"] == (536, 250)
assert hmtx["minus"] == (422, 26) # 'minus' left sidebearing changed
assert vmtx["minus"] == (536, 250) # 'minus' top sidebearing too
class InstantiateCvarTest(object):
@pytest.mark.parametrize(
"location, expected",
[
pytest.param({"wght": -1.0}, [500, -400, 150, 250], id="wght=-1.0"),
pytest.param({"wdth": -1.0}, [500, -400, 180, 200], id="wdth=-1.0"),
pytest.param({"wght": -0.5}, [500, -400, 165, 250], id="wght=-0.5"),
pytest.param({"wdth": -0.3}, [500, -400, 180, 235], id="wdth=-0.3"),
],
)
def test_pin_and_drop_axis(self, varfont, location, expected):
instancer.instantiateCvar(varfont, location)
assert list(varfont["cvt "].values) == expected
# check that the pinned axis has been dropped from cvar
pinned_axes = location.keys()
assert not any(
axis in t.axes for t in varfont["cvar"].variations for axis in pinned_axes
)
def test_full_instance(self, varfont):
instancer.instantiateCvar(varfont, {"wght": -0.5, "wdth": -0.5})
assert list(varfont["cvt "].values) == [500, -400, 165, 225]
assert "cvar" not in varfont
class InstantiateMVARTest(object):
@pytest.mark.parametrize(
"location, expected",
[
pytest.param(
{"wght": 1.0},
{"strs": 100, "undo": -200, "unds": 150, "xhgt": 530},
id="wght=1.0",
),
pytest.param(
{"wght": 0.5},
{"strs": 75, "undo": -150, "unds": 100, "xhgt": 515},
id="wght=0.5",
),
pytest.param(
{"wght": 0.0},
{"strs": 50, "undo": -100, "unds": 50, "xhgt": 500},
id="wght=0.0",
),
pytest.param(
{"wdth": -1.0},
{"strs": 20, "undo": -100, "unds": 50, "xhgt": 500},
id="wdth=-1.0",
),
pytest.param(
{"wdth": -0.5},
{"strs": 35, "undo": -100, "unds": 50, "xhgt": 500},
id="wdth=-0.5",
),
pytest.param(
{"wdth": 0.0},
{"strs": 50, "undo": -100, "unds": 50, "xhgt": 500},
id="wdth=0.0",
),
],
)
def test_pin_and_drop_axis(self, varfont, location, expected):
mvar = varfont["MVAR"].table
# initially we have two VarData: the first contains deltas associated with 3
# regions: 1 with only wght, 1 with only wdth, and 1 with both wght and wdth
assert len(mvar.VarStore.VarData) == 2
assert mvar.VarStore.VarRegionList.RegionCount == 3
assert mvar.VarStore.VarData[0].VarRegionCount == 3
assert all(len(item) == 3 for item in mvar.VarStore.VarData[0].Item)
# The second VarData has deltas associated only with 1 region (wght only).
assert mvar.VarStore.VarData[1].VarRegionCount == 1
assert all(len(item) == 1 for item in mvar.VarStore.VarData[1].Item)
instancer.instantiateMVAR(varfont, location)
for mvar_tag, expected_value in expected.items():
table_tag, item_name = MVAR_ENTRIES[mvar_tag]
assert getattr(varfont[table_tag], item_name) == expected_value
# check that regions and accompanying deltas have been dropped
num_regions_left = len(mvar.VarStore.VarRegionList.Region)
assert num_regions_left < 3
assert mvar.VarStore.VarRegionList.RegionCount == num_regions_left
assert mvar.VarStore.VarData[0].VarRegionCount == num_regions_left
# VarData subtables have been merged
assert len(mvar.VarStore.VarData) == 1
@pytest.mark.parametrize(
"location, expected",
[
pytest.param(
{"wght": 1.0, "wdth": 0.0},
{"strs": 100, "undo": -200, "unds": 150},
id="wght=1.0,wdth=0.0",
),
pytest.param(
{"wght": 0.0, "wdth": -1.0},
{"strs": 20, "undo": -100, "unds": 50},
id="wght=0.0,wdth=-1.0",
),
pytest.param(
{"wght": 0.5, "wdth": -0.5},
{"strs": 55, "undo": -145, "unds": 95},
id="wght=0.5,wdth=-0.5",
),
pytest.param(
{"wght": 1.0, "wdth": -1.0},
{"strs": 50, "undo": -180, "unds": 130},
id="wght=0.5,wdth=-0.5",
),
],
)
def test_full_instance(self, varfont, location, expected):
instancer.instantiateMVAR(varfont, location)
for mvar_tag, expected_value in expected.items():
table_tag, item_name = MVAR_ENTRIES[mvar_tag]
assert getattr(varfont[table_tag], item_name) == expected_value
assert "MVAR" not in varfont
class InstantiateHVARTest(object):
# the 'expectedDeltas' below refer to the VarData item deltas for the "hyphen"
# glyph in the PartialInstancerTest-VF.ttx test font, that are left after
# partial instancing
@pytest.mark.parametrize(
"location, expectedRegions, expectedDeltas",
[
({"wght": -1.0}, [{"wdth": (-1.0, -1.0, 0)}], [-59]),
({"wght": 0}, [{"wdth": (-1.0, -1.0, 0)}], [-48]),
({"wght": 1.0}, [{"wdth": (-1.0, -1.0, 0)}], [7]),
(
{"wdth": -1.0},
[
{"wght": (-1.0, -1.0, 0.0)},
{"wght": (0.0, 0.6099854, 1.0)},
{"wght": (0.6099854, 1.0, 1.0)},
],
[-11, 31, 51],
),
({"wdth": 0}, [{"wght": (0.6099854, 1.0, 1.0)}], [-4]),
],
)
def test_partial_instance(self, varfont, location, expectedRegions, expectedDeltas):
instancer.instantiateHVAR(varfont, location)
assert "HVAR" in varfont
hvar = varfont["HVAR"].table
varStore = hvar.VarStore
regions = varStore.VarRegionList.Region
fvarAxes = [a for a in varfont["fvar"].axes if a.axisTag not in location]
regionDicts = [reg.get_support(fvarAxes) for reg in regions]
assert len(regionDicts) == len(expectedRegions)
for region, expectedRegion in zip(regionDicts, expectedRegions):
assert region.keys() == expectedRegion.keys()
for axisTag, support in region.items():
assert support == pytest.approx(expectedRegion[axisTag])
assert len(varStore.VarData) == 1
assert varStore.VarData[0].ItemCount == 2
assert hvar.AdvWidthMap is not None
advWithMap = hvar.AdvWidthMap.mapping
assert advWithMap[".notdef"] == advWithMap["space"]
varIdx = advWithMap[".notdef"]
# these glyphs have no metrics variations in the test font
assert varStore.VarData[varIdx >> 16].Item[varIdx & 0xFFFF] == (
[0] * varStore.VarData[0].VarRegionCount
)
varIdx = advWithMap["hyphen"]
assert varStore.VarData[varIdx >> 16].Item[varIdx & 0xFFFF] == expectedDeltas
def test_full_instance(self, varfont):
instancer.instantiateHVAR(varfont, {"wght": 0, "wdth": 0})
assert "HVAR" not in varfont
def test_partial_instance_keep_empty_table(self, varfont):
# Append an additional dummy axis to fvar, for which the current HVAR table
# in our test 'varfont' contains no variation data.
# Instancing the other two wght and wdth axes should leave HVAR table empty,
# to signal there are variations to the glyph's advance widths.
fvar = varfont["fvar"]
axis = _f_v_a_r.Axis()
axis.axisTag = "TEST"
fvar.axes.append(axis)
instancer.instantiateHVAR(varfont, {"wght": 0, "wdth": 0})
assert "HVAR" in varfont
varStore = varfont["HVAR"].table.VarStore
assert varStore.VarRegionList.RegionCount == 0
assert not varStore.VarRegionList.Region
assert varStore.VarRegionList.RegionAxisCount == 1
class InstantiateItemVariationStoreTest(object):
def test_VarRegion_get_support(self):
axisOrder = ["wght", "wdth", "opsz"]
regionAxes = {"wdth": (-1.0, -1.0, 0.0), "wght": (0.0, 1.0, 1.0)}
region = builder.buildVarRegion(regionAxes, axisOrder)
assert len(region.VarRegionAxis) == 3
assert region.VarRegionAxis[2].PeakCoord == 0
fvarAxes = [SimpleNamespace(axisTag=axisTag) for axisTag in axisOrder]
assert region.get_support(fvarAxes) == regionAxes
@pytest.fixture
def varStore(self):
return builder.buildVarStore(
builder.buildVarRegionList(
[
{"wght": (-1.0, -1.0, | |
from pathlib import Path
import sqlalchemy as sa
from spinta.core.config import RawConfig
from spinta.testing.cli import SpintaCliRunner
from spinta.testing.config import configure
from spinta.testing.datasets import Sqlite
from spinta.testing.manifest import compare_manifest
from spinta.testing.tabular import create_tabular_manifest
from spinta.testing.manifest import load_manifest
def test_inspect(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
sqlite: Sqlite,
):
# Prepare source data.
sqlite.init({
'COUNTRY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('CODE', sa.Text),
sa.Column('NAME', sa.Text),
],
'CITY': [
sa.Column('NAME', sa.Text),
sa.Column('COUNTRY_ID', sa.Integer, sa.ForeignKey("COUNTRY.ID")),
],
})
# Configure Spinta.
rc = configure(rc, None, tmpdir / 'manifest.csv', f'''
d | r | m | property | type | ref | source | access
dataset | | | |
| rs | sql | | {sqlite.dsn} |
''')
cli.invoke(rc, ['inspect', '-o', tmpdir / 'result.csv'])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'result.csv')
manifest.datasets['dataset'].resources['rs'].external = 'sqlite'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
dataset | | | |
| rs | sql | | sqlite |
| | | |
| | | Country | | id | COUNTRY |
| | | | id | integer | | ID |
| | | | code | string | | CODE |
| | | | name | string | | NAME |
| | | |
| | | City | | | CITY |
| | | | name | string | | NAME |
| | | | country_id | ref | Country | COUNTRY_ID |
'''
def test_inspect_from_manifest_table(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
sqlite: Sqlite,
):
# Prepare source data.
sqlite.init({
'COUNTRY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('NAME', sa.Text),
],
})
create_tabular_manifest(tmpdir / 'manifest.csv', f'''
d | r | m | property | type | ref | source | access
dataset | | | |
| rs | sql | | {sqlite.dsn} |
''')
cli.invoke(rc, [
'inspect', tmpdir / 'manifest.csv',
'-o', tmpdir / 'result.csv',
])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'result.csv')
manifest.datasets['dataset'].resources['rs'].external = 'sqlite'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
dataset | | | |
| rs | sql | | sqlite |
| | | |
| | | Country | | id | COUNTRY |
| | | | id | integer | | ID |
| | | | name | string | | NAME |
'''
def test_inspect_format(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
sqlite: Sqlite,
):
# Prepare source data.
sqlite.init({
'COUNTRY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('CODE', sa.Text),
sa.Column('NAME', sa.Text),
],
'CITY': [
sa.Column('NAME', sa.Text),
sa.Column('COUNTRY_ID', sa.Integer, sa.ForeignKey("COUNTRY.ID")),
],
})
cli.invoke(rc, [
'inspect',
'-r', 'sql', sqlite.dsn,
'-o', tmpdir / 'manifest.csv',
])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'manifest.csv')
dataset = manifest.datasets['datasets/gov/example']
dataset.resources['resource1'].external = 'sqlite'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
datasets/gov/example | | | |
| resource1 | sql | | sqlite |
| | | |
| | | Country | | id | COUNTRY |
| | | | id | integer | | ID |
| | | | code | string | | CODE |
| | | | name | string | | NAME |
| | | |
| | | City | | | CITY |
| | | | name | string | | NAME |
| | | | country_id | ref | Country | COUNTRY_ID |
'''
def test_inspect_cyclic_refs(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
sqlite: Sqlite,
):
# Prepare source data.
sqlite.init({
'COUNTRY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('CAPITAL', sa.Integer, sa.ForeignKey("CITY.ID")),
sa.Column('NAME', sa.Text),
],
'CITY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('NAME', sa.Text),
sa.Column('COUNTRY_ID', sa.Integer, sa.ForeignKey("COUNTRY.ID")),
],
})
cli.invoke(rc, [
'inspect',
'-r', 'sql', sqlite.dsn,
'-o', tmpdir / 'manifest.csv',
])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'manifest.csv')
dataset = manifest.datasets['datasets/gov/example']
dataset.resources['resource1'].external = 'sqlite'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
datasets/gov/example | | | |
| resource1 | sql | | sqlite |
| | | |
| | | City | | id | CITY |
| | | | id | integer | | ID |
| | | | name | string | | NAME |
| | | | country_id | ref | Country | COUNTRY_ID |
| | | |
| | | Country | | id | COUNTRY |
| | | | id | integer | | ID |
| | | | capital | ref | City | CAPITAL |
| | | | name | string | | NAME |
'''
def test_inspect_self_refs(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
sqlite: Sqlite,
):
# Prepare source data.
sqlite.init({
'CATEGORY': [
sa.Column('ID', sa.Integer, primary_key=True),
sa.Column('NAME', sa.Text),
sa.Column('PARENT_ID', sa.Integer, sa.ForeignKey("CATEGORY.ID")),
],
})
cli.invoke(rc, [
'inspect',
'-r', 'sql', sqlite.dsn,
'-o', tmpdir / 'manifest.csv',
])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'manifest.csv')
dataset = manifest.datasets['datasets/gov/example']
dataset.resources['resource1'].external = 'sqlite'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
datasets/gov/example | | | |
| resource1 | sql | | sqlite |
| | | |
| | | Category | | id | CATEGORY |
| | | | id | integer | | ID |
| | | | name | string | | NAME |
| | | | parent_id | ref | Category | PARENT_ID |
'''
def test_inspect_oracle_sqldump_stdin(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
):
cli.invoke(rc, [
'inspect',
'-r', 'sqldump', '-',
'-o', tmpdir / 'manifest.csv',
], input='''
--------------------------------------------------------
-- DDL for Table COUNTRY
--------------------------------------------------------
CREATE TABLE "GEO"."COUNTRY" (
"ID" NUMBER(19,0),
"NAME" VARCHAR2(255 CHAR)
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(
INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT
)
TABLESPACE "GEO_PORTAL_V2" ;
--------------------------------------------------------
-- DDL for Table COUNTRY
--------------------------------------------------------
CREATE TABLE "GEO"."CITY" (
"ID" NUMBER(19,0),
"NAME" VARCHAR2(255 CHAR)
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(
INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT
)
TABLESPACE "GEO_PORTAL_V2" ;
''')
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'manifest.csv')
assert manifest == '''
id | d | r | b | m | property | type | ref | source | prepare | level | access | uri | title | description
| datasets/gov/example | | | | | | | | |
| | resource1 | sqldump | | - | | | | | |
| | | | | | | | | |
| | | | Country | | | COUNTRY | | | | | |
| | | | | | | | | |
| | | | City | | | CITY | | | | | |
'''
def test_inspect_oracle_sqldump_file_with_formula(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: Path,
):
(tmpdir / 'dump.sql').write_text('''
-- Šalys
CREATE TABLE "GEO"."COUNTRY" (
"ID" NUMBER(19,0),
"NAME" VARCHAR2(255 CHAR)
);
''', encoding='iso-8859-4')
cli.invoke(rc, [
'inspect',
'-r', 'sqldump', tmpdir / 'dump.sql',
'-f', 'file(self, encoding: "iso-8859-4")',
'-o', tmpdir / 'manifest.csv',
])
# Check what was detected.
manifest = load_manifest(rc, tmpdir / 'manifest.csv')
dataset = manifest.datasets['datasets/gov/example']
dataset.resources['resource1'].external = 'dump.sql'
assert manifest == '''
d | r | b | m | property | type | ref | source | prepare
datasets/gov/example | | | |
| resource1 | sqldump | | dump.sql | file(self, encoding: 'iso-8859-4')
| | | |
| | | Country | | | COUNTRY |
'''
def test_inspect_with_schema(
rc: RawConfig,
cli: SpintaCliRunner,
tmpdir: | |
"""
Minewalker is a game where the player aims to cross a field without
a mine.
"""
import msvcrt, random, os, time, ast, sys
from random import randint
from termcolor import colored
class Main():
"""
Minewalker
"""
def __init__(self, x=10, y=10, mines=15, player=' 0 ', colour=None, trail=' # ', wait=3, climit=3, rstart=True, mode='custom') -> None:
"""
Constructor
"""
self.array = []
self.yValue = y
self.xValue = x
self.numMines = mines
self.user = colored(player, colour)
self.running = True
self.running = True
self.key = ''
self.yPosition = randint(0, self.yValue - 1)
self.xPosition = 0
self.blankField= []
self.trail = trail
self.coordinates = []
self.wait = wait
self.starting = True
self.yes = ["yes", "ye", "y"]
self.yUser = random.randint(0, self.yValue-1)
self.checking = False
self.xCheck = 0
self.yCheck = 0
self.climit = climit
self.xstart = 0
self.ystart = 0
self.yend = 0
self.xend = 2
self.rstart = rstart
self.mode = mode
self.hScores = None
self.start_time = time.time()
self.total_time = 0
self.name = ''
self.base = [[], [], [], []]
self.score = []
self.content = ''
def minefield(self) -> list:
"""
Returns the minefield
"""
# This appends the number of rows
# to the array
for row in range(self.yValue):
self.subarray = []
# The following loop appends the number
# of columns to each row, denoted by '_'
for column in range(self.xValue):
self.subarray.append(' _ ')
# This appends each row to the array,
# creating a new row
self.array.append(self.subarray)
# iterates through each row, assigning
# a bomb to a random position in that row
for mine in range(self.numMines):
self.randomColumn = random.randint(0, self.xValue-1)
self.rownum = self.array[mine % self.yValue]
self.rownum[self.randomColumn] = colored(' @ ', 'red')
# appends the coordinates of the mine to a list. Coordinates are from top left to bottom right
self.coordinates.append((self.randomColumn, mine % self.yValue))
if self.rstart == False:
self.yPosition = 0
self.xPosition = 0
# Removes a bomb if its in the user's starting position. This happens rarely. while is to ensure it
# does not happen again even after adding another mine.
while (self.xPosition, self.yPosition) in self.coordinates:
# Removes user's position from list if it exists
self.coordinates.remove((self.xPosition, self.yPosition))
self.randomColumn = random.randint(0, self.xValue-1)
self.rownum = self.array[self.yUser]
self.rownum[self.randomColumn] = colored(' @ ', 'red')
self.coordinates.append((self.randomColumn, self.yUser))
# Checks for mines in the last column and removes them
for (x, y) in self.coordinates.copy():
if self.xValue-1 == x:
self.array[y][x]= ' _ '
self.coordinates.remove((x, y))
else:
continue
# Places the user on a random row in the first column or in the
# top-left corner
self.array[self.yPosition][self.xPosition] = self.user
self.refresh()
def refresh(self):
"""
Refreshes the minefield and prints it to the console
It prints the array's elements as a table.
"""
# clears previous output
os.system('cls')
for row in self.array:
for element in row:
print(element, end='')
print('\n', end='')
# Displays the normal field after the timer
if self.starting == True:
time.sleep(self.wait)
for x, y in self.coordinates:
# replaces each bomb with normal ' _ ' symbol to hide it after timer
self.array[y][x] = ' _ '
self.starting = False
self.refresh()
# Checks to see if the player wins by checking its on the last column
if self.xPosition == self.xValue - 1:
self.win()
# checks if the player is on a mine by seeing if its matched coordinates.
if (self.xPosition, self.yPosition) in self.coordinates:
self.game_over()
if self.checking == True:
self.check()
self.player()
def player(self):
"""
Creates player and moves them around the board with inputs
"""
# Conditions to determine the new position of the user while in index
self.key = ord(msvcrt.getch())
if self.key == 119:
self.yPosition -= 1
if self.key == 97:
self.xPosition -= 1
if self.key == 115:
self.yPosition += 1
if self.key == 100:
self.xPosition += 1
if self.key == 32:
if self.climit > 0:
self.check()
# brings user up by 1 to cancel
if self.yPosition > self.yValue - 1:
self.yPosition -= 1
if self.yPosition < 0:
self.yPosition += 1
if self.xPosition > self.xValue - 1:
self.xPosition -= 1
if self.xPosition < 0:
self.xPosition += 1
# Checks whether user is in each row of the field. If it is
# then it replaces the user with a hashtag (or trail) before
# replacing another element with the user to look like its
# moving.
for row in self.array:
if self.user in row:
row[row.index(self.user)] = self.trail
else:
continue
self.array[self.yPosition][self.xPosition] = self.user
self.refresh()
def check(self):
"""
Checks the surrounding squares around the user for bombs with the space bar
"""
# Checks if the coordinates of the surrounding squares of the user
# has mines or vacant squares.
# These conditions control index error when the function is used
if self.xPosition == 0:
self.xstart = 0
if self.xPosition != 0:
self.xstart = -1
if self.yPosition == 0:
self.ystart = 0
self.yend = 2
if 0 < self.yPosition < self.yValue - 1:
self.ystart = -1
self.yend = 2
if self.yPosition == self.yValue - 1:
self.ystart = -1
self.yend = 1
# A loop to 'discover' the squares around the user's current position
for yValue in range(self.ystart, self.yend):
self.yCheck = self.yPosition + yValue
for xValue in range(self.xstart, self.xend):
self.xCheck = self.xPosition + xValue
# checks if its in index range in list
if (self.xCheck, self.yCheck) in self.coordinates:
self.array[self.yCheck][self.xCheck] = colored(' @ ', 'red')
else:
self.array[self.yCheck][self.xCheck] = self.trail
# Changes current position of the user to its symbol
self.array[self.yPosition][self.xPosition] = self.user
# climit is 'check limit' which just makes sure the user can use this function
# a certain number of times
self.climit -= 1
self.refresh()
def game_over(self):
"""
Ends the game when someone lands on a mine
"""
self.display_end()
print('Game Over!')
self.repeat()
def repeat(self):
"""
Asks user whether the want to repeat the game
"""
answer = input("Would you like to repeat?\n")
if answer.lower() in self.yes:
os.system('cls')
UI().settings()
else:
self.hScores = open(os.path.join(sys.path[0],'High_Scores.txt'), 'r')
self.content = ast.literal_eval(self.hScores.read())
print("Thanks for Playing !!")
print("Here are the current High Scores:\n")
for difficulty in self.content:
if difficulty == self.content[0]:
print("EASY")
print("--------------------")
elif difficulty == self.content[1]:
print("\nMEDIUM")
print("--------------------")
elif difficulty == self.content[2]:
print("\nHARD")
print("--------------------")
elif difficulty == self.content[3]:
print("\nIMPOSSIBLE")
print("--------------------")
for ranking in range(5):
try:
print(f"Number {ranking+1}: {sorted(difficulty)[ranking][1]}, {round(sorted(difficulty)[ranking][0], 2)}")
except:
print(f"Number {ranking+1}: None")
quit()
def display_end(self):
"""
Displays the board when the game ends
"""
os.system('cls')
for x, y in self.coordinates:
# replaces each bomb with normal ' _ ' symbol to hide it after timer
self.array[y][x] = colored(' @ ', 'red')
for row in self.array:
for element in row:
print(element, end='')
print('\n', end='')
def win(self):
"""
Ends the game and exposes the mines
"""
self.total_time = time.time() - (self.start_time + self.wait)
self.display_end()
print('You Win!')
self.high_score()
def high_score(self):
self.name = input("What is your syndicate name? ")
# writes list to file if non-existent
if not os.path.isfile(os.path.join(sys.path[0], 'High_Scores.txt')):
self.hScores = open(os.path.join(sys.path[0], 'High_Scores.txt'), 'w')
self.hScores.write(str(self.base))
self.hScores.close()
self.score = [self.total_time, self.name]
self.hScores = open(os.path.join(sys.path[0], 'High_Scores.txt'), 'r')
self.content = ast.literal_eval(self.hScores.read())
self.hScores.close()
# places the scores in different lists inside the array based
# on level of difficulty
if self.mode == 'easy':
self.content[0].append(self.score)
elif self.mode == 'medium':
self.content[1].append(self.score)
elif self.mode == 'hard':
self.content[2].append(self.score)
elif self.mode == 'impossible':
self.content[3].append(self.score)
else:
self.repeat()
self.hScores = open(os.path.join(sys.path[0], 'High_Scores.txt'), 'w')
self.hScores.write(str(self.content))
self.hScores.close()
self.repeat()
class UI():
def __init__(self):
""" Default values
"""
self.x = 10
self.y = 10
self.mines = 15
self.player = ' 0 '
self.colour = None
self.trail = ' # '
self.wait = 3
self.valid = True
self.climit = 5
self.rstart = False
self.mode = ''
self.colours = ['grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
print(f"""
Currently, your default settings are as follows:
Player: {self.player}
Colour: {self.colour}
trail: {self.trail}
Your player and trail can only be one character long. To view all of the available colours,
click !colours when asked to enter a colour. To use the default settings for any of the
aesthetic features, hit 'Enter'.
""")
def settings(self):
"""
Defines settings of the game
"""
# The following mainly changes the aesthetics of the game
| |
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "email_contact"
class email_content(models.Model):
email_content_id = models.AutoField(primary_key=True)
email_subject = models.CharField(max_length=255)
email_content = models.TextField('email_content')
is_private = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "email_content"
class folder(models.Model):
folder_id = models.AutoField(primary_key=True)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
blank=True,
null=True
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
blank=True,
null=True
)
customer = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
organisation = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
blank=True,
null=True,
)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
blank=True,
null=True,
)
requirement_item = models.ForeignKey(
'requirement_item',
on_delete=models.CASCADE,
blank=True,
null=True,
)
request_for_change = models.ForeignKey(
'request_for_change',
on_delete=models.CASCADE,
blank=True,
null=True,
)
folder_description = models.CharField(max_length=255)
parent_folder = models.ForeignKey(
'self',
blank=True,
null=True,
on_delete=models.CASCADE
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.folder_description)
class Meta:
db_table = "folder"
class group(models.Model):
group_id = models.AutoField(primary_key=True)
group_name = models.CharField(
max_length=50,
)
parent_group = models.ForeignKey(
"self",
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
def natural_key(self):
return (
self.group_id,
self.group_name
)
def __str__(self):
return str(self.group_name)
class Meta:
db_table = "group"
class group_manager(models.Manager):
def get_by_natural_key(
self,
group_id,
group_name
):
return self.get(
group_id=group_id,
group_name=group_name
)
class group_permission(models.Model):
group_permission_id = models.AutoField(primary_key=True)
permission_set = models.ForeignKey(
'permission_set',
on_delete=models.CASCADE,
)
group = models.ForeignKey(
'group',
on_delete=models.CASCADE
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.permission_set)
class Meta:
db_table = "group_permission"
class kanban_board(models.Model):
kanban_board_id = models.AutoField(primary_key=True)
kanban_board_name = models.CharField(max_length=255)
requirement = models.ForeignKey(
'requirement',
null=True,
blank=True,
on_delete=models.CASCADE,
)
kanban_board_status = models.CharField(
max_length=10,
choices=KANBAN_BOARD_STATUS_CHOICE,
default="Open",
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
creation_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_creation_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "kanban_board"
def __str__(self):
return str(self.kanban_board_name)
class kanban_card(models.Model):
kanban_card_id = models.AutoField(primary_key=True)
kanban_card_text = models.CharField(max_length=255)
kanban_card_description = models.TextField(
blank=True,
null=True,
)
kanban_card_sort_number = models.IntegerField()
kanban_level = models.ForeignKey(
'kanban_level',
on_delete=models.CASCADE,
)
kanban_column = models.ForeignKey(
'kanban_column',
on_delete=models.CASCADE,
)
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
null=True,
blank=True,
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
null=True,
blank=True,
)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
null=True,
blank=True,
)
is_archived = models.BooleanField(
default=False,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "kanban_card"
def __str__(self):
return str(self.kanban_card_text)
class kanban_column(models.Model):
kanban_column_id = models.AutoField(primary_key=True)
kanban_column_name = models.CharField(max_length=255)
kanban_column_sort_number = models.IntegerField()
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "kanban_column"
def __str__(self):
return str(self.kanban_column_name)
# class kanban_comment(models.Model):
# kanban_comment_id = models.AutoField(primary_key=True)
# kanban_comment = models.TextField()
# kanban_board = models.ForeignKey(
# 'kanban_board',
# on_delete=models.CASCADE,
# null=True,
# blank=True,
# )
# kanban_card = models.ForeignKey(
# 'kanban_card',
# on_delete=models.CASCADE,
# null=True,
# blank=True,
# )
# user = models.ForeignKey(
# User,
# on_delete=models.CASCADE,
# null=True
# )
# user_infomation = models.CharField(max_length=255)
# date_created = models.DateTimeField(auto_now_add=True)
# date_modified = models.DateTimeField(auto_now=True)
# change_user = models.ForeignKey(
# User,
# on_delete=models.CASCADE,
# related_name='%(class)s_change_user'
# )
# is_deleted = models.BooleanField(
# default=False,
# )
#
# class Meta:
# db_table = "kanban_comment"
#
# def __str__(self):
# return str(self.kanban_comment)
class kanban_level(models.Model):
kanban_level_id = models.AutoField(primary_key=True)
kanban_level_name = models.CharField(max_length=255)
kanban_level_sort_number = models.IntegerField()
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "kanban_level"
def __str__(self):
return str(self.kanban_level_name)
class kudos(models.Model):
kudos_key = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
)
kudos_rating = models.IntegerField(
choices=RATING_SCORE,
default=0,
)
improvement_note = models.TextField(
blank=True,
null=True,
)
liked_note = models.TextField(
blank=True,
null=True,
)
extra_kudos = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True,
)
submitted_kudos = models.BooleanField(
default=False,
)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
customer = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
class Meta:
db_table = "kudos"
class list_of_amount_type(models.Model):
amount_type_id = models.AutoField(primary_key=True)
amount_type_description = models.CharField(max_length=20)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.amount_type_description)
class Meta:
db_table = "list_of_amount_type"
ordering = ['list_order']
class list_of_bug_client(models.Model):
list_of_bug_client_id = models.AutoField(primary_key=True)
bug_client_name = models.CharField(max_length=50)
bug_client_api_url = models.CharField(max_length=255)
# The different API commands
api_open_bugs = models.CharField(max_length=255) # Find all open bugs
api_search_bugs = models.CharField(max_length=255) # Search command
api_bug = models.CharField(max_length=255) # Get that particular bug information - direct link to bug
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.bug_client_name)
class Meta:
db_table = "list_of_bug_client"
class list_of_currency(models.Model):
currency_id = models.AutoField(primary_key=True)
currency_description = models.CharField(max_length=20)
currency_short_description = models.CharField(max_length=4)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.currency_description)
class Meta:
db_table = "list_of_currency"
class list_of_contact_type(models.Model):
contact_type_id = models.AutoField(primary_key=True)
contact_type = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.contact_type)
class Meta:
db_table = "list_of_contact_type"
class list_of_country(models.Model):
country_id = models.CharField(primary_key=True, max_length=2)
country_name = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.country_name)
class Meta:
db_table = "list_of_country"
class list_of_country_region(models.Model):
region_id = models.AutoField(primary_key=True)
country = models.ForeignKey(
'list_of_country',
on_delete=models.CASCADE,
)
region_name = models.CharField(max_length=150)
region_type = models.CharField(
max_length=80,
null=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_change_user', blank=True,
null=True)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.region_name)
class Meta:
db_table = "list_of_country_region"
class list_of_lead_source(models.Model):
lead_source_id = models.AutoField(primary_key=True)
lead_source_description = models.CharField(max_length=20)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.lead_source_description)
class Meta:
db_table = "list_of_lead_source"
class list_of_opportunity_stage(models.Model):
opportunity_stage_id = models.AutoField(primary_key=True)
opportunity_stage_description = models.CharField(max_length=50)
probability_success = models.DecimalField(
max_digits=3,
decimal_places=0,
)
list_order = models.IntegerField(unique=True)
opportunity_closed = models.BooleanField(
default=False,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.opportunity_stage_description)
class Meta:
db_table = "list_of_opportunity_stage"
ordering = ['list_order']
class list_of_quote_stage(models.Model):
quote_stage_id = models.AutoField(primary_key=True)
quote_stage = models.CharField(
max_length=50,
unique=True,
)
is_invoice = models.BooleanField(
default=False,
)
quote_closed = models.BooleanField(
default=False,
)
sort_order = models.IntegerField(unique=True, auto_created=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.quote_stage)
class Meta:
db_table = "list_of_quote_stage"
class list_of_requirement_item_status(models.Model):
requirement_item_status_id = models.AutoField(primary_key=True)
requirement_item_status = models.CharField(
max_length=100,
)
status_is_closed = models.BooleanField(
default=False,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.requirement_item_status)
class Meta:
db_table = "list_of_requirement_item_status"
class list_of_requirement_item_type(models.Model):
requirement_item_type_id = models.AutoField(primary_key=True)
requirement_item_type = models.CharField(
max_length=100,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.requirement_item_type)
class Meta:
db_table = "list_of_requirement_item_type"
class list_of_requirement_status(models.Model):
requirement_status_id = models.AutoField(primary_key=True)
requirement_status = models.CharField(
max_length=50,
)
requirement_status_is_closed = models.BooleanField(
default=False,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.requirement_status)
class Meta:
db_table = "list_of_requirement_status"
class list_of_requirement_type(models.Model):
requirement_type_id = models.AutoField(primary_key=True)
requirement_type = models.CharField(
max_length=100,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.requirement_type)
class Meta:
db_table = "list_of_requirement_type"
class list_of_tax(models.Model):
tax_id = models.AutoField(primary_key=True)
tax_amount = models.DecimalField(
max_digits=6,
decimal_places=4,
)
tax_description = models.CharField(
max_length=50,
blank=True,
null=True,
) # Incase the customer wants to place a name against the tax
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = | |
--pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 192.168.3.11/24 -d 172.16.17.32/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True)
])
self.router.iptables_manager.apply.assert_called_once_with()
def test_sync(self):
fake_vpn_service = FAKE_VPN_SERVICE
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
fake_vpn_service]
context = mock.Mock()
self.driver._sync_vpn_processes = mock.Mock()
self.driver._delete_vpn_processes = mock.Mock()
self.driver._cleanup_stale_vpn_processes = mock.Mock()
sync_routers = [{'id': fake_vpn_service['router_id']}]
sync_router_ids = [fake_vpn_service['router_id']]
self.driver.sync(context, sync_routers)
self.driver._sync_vpn_processes.assert_called_once_with(
[fake_vpn_service], sync_router_ids)
self.driver._delete_vpn_processes.assert_called_once_with(
sync_router_ids, sync_router_ids)
self.driver._cleanup_stale_vpn_processes.assert_called_once_with(
sync_router_ids)
def test__sync_vpn_processes_new_vpn_service(self):
new_vpnservice = self.vpnservice
router_id = new_vpnservice['router_id']
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([new_vpnservice], router_id)
self._test_add_nat_rule()
self.driver.processes[router_id].update.assert_called_once_with()
def test_add_nat_rules_with_multiple_local_subnets(self):
"""Ensure that add nat rule combinations are correct."""
overrides = {'local_cidrs': [['10.0.0.0/24', '192.168.127.12/24'],
['172.16.58.3/24', '192.168.3.11/24']]}
self.modify_config_for_test(overrides)
self.driver._update_nat(self.vpnservice, self.driver.add_nat_rule)
self._test_add_nat_rule_with_multiple_locals()
def test__sync_vpn_processes_router_with_no_vpn(self):
"""Test _sync_vpn_processes with a router not hosting vpnservice.
This test case tests that when a router which doesn't host
vpn services is updated, sync_vpn_processes doesn't restart/update
the existing vpnservice processes.
"""
process = mock.Mock()
process.vpnservice = self.vpnservice
process.connection_status = {}
self.driver.processes = {
self.vpnservice['router_id']: process}
router_id_no_vpn = _uuid()
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
self.driver._sync_vpn_processes([self.vpnservice],
[router_id_no_vpn])
self.assertEqual(0, ensure_p.call_count)
def test__sync_vpn_processes_router_with_no_vpn_and_no_vpn_services(self):
"""No vpn services running and router not hosting vpn svc."""
router_id_no_vpn = _uuid()
self.driver.process_status_cache = {}
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([], [router_id_no_vpn])
self.assertEqual(0, ensure_p.call_count)
def test__sync_vpn_processes_router_with_no_vpn_agent_restarted(self):
"""Test for the router not hosting vpnservice and agent restarted.
This test case tests that when a non vpnservice hosted router
is updated, _sync_vpn_processes restart/update the existing vpnservices
which are not yet stored in driver.processes.
"""
router_id = FAKE_ROUTER_ID
self.driver.process_status_cache = {}
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([self.vpnservice], [router_id])
self._test_add_nat_rule()
self.driver.processes[router_id].update.assert_called_once_with()
def test_delete_vpn_processes(self):
router_id_no_vpn = _uuid()
vpn_service_router_id = _uuid()
with mock.patch.object(self.driver,
'destroy_process') as (fake_destroy_process):
self.driver._delete_vpn_processes([router_id_no_vpn],
[vpn_service_router_id])
fake_destroy_process.assert_has_calls(
[mock.call(router_id_no_vpn)])
# test that _delete_vpn_processes doesn't delete the
# the valid vpn processes
with mock.patch.object(self.driver,
'destroy_process') as fake_destroy_process:
self.driver._delete_vpn_processes([vpn_service_router_id],
[vpn_service_router_id])
self.assertFalse(fake_destroy_process.called)
def test_cleanup_stale_vpn_processes(self):
stale_vpn_service = {'router_id': _uuid()}
active_vpn_service = {'router_id': _uuid()}
self.driver.processes = {
stale_vpn_service['router_id']: stale_vpn_service,
active_vpn_service['router_id']: active_vpn_service}
with mock.patch.object(self.driver, 'destroy_process') as destroy_p:
self.driver._cleanup_stale_vpn_processes(
[active_vpn_service['router_id']])
destroy_p.assert_has_calls(
[mock.call(stale_vpn_service['router_id'])])
def fake_ensure_process(self, process_id, vpnservice=None):
process = self.driver.processes.get(process_id)
if not process:
process = mock.Mock()
process.vpnservice = self.vpnservice
process.connection_status = {}
process.status = constants.ACTIVE
process.updated_pending_status = True
self.driver.processes[process_id] = process
elif vpnservice:
process.vpnservice = vpnservice
process.update_vpnservice(vpnservice)
return process
def fake_destroy_router(self, process_id):
process = self.driver.processes.get(process_id)
if process:
del self.driver.processes[process_id]
def test_sync_update_vpnservice(self):
with mock.patch.object(self.driver,
'ensure_process') as ensure_process:
ensure_process.side_effect = self.fake_ensure_process
new_vpn_service = self.vpnservice
updated_vpn_service = copy.deepcopy(new_vpn_service)
updated_vpn_service['ipsec_site_connections'][1].update(
{'peer_cidrs': ['172.16.17.32/24', '172.16.17.32/24']})
context = mock.Mock()
self.driver.process_status_cache = {}
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
new_vpn_service]
self.driver.sync(context, [{'id': FAKE_ROUTER_ID}])
process = self.driver.processes[FAKE_ROUTER_ID]
self.assertEqual(new_vpn_service, process.vpnservice)
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
updated_vpn_service]
self.driver.sync(context, [{'id': FAKE_ROUTER_ID}])
process = self.driver.processes[FAKE_ROUTER_ID]
process.update_vpnservice.assert_called_once_with(
updated_vpn_service)
self.assertEqual(updated_vpn_service, process.vpnservice)
def test_sync_removed(self):
self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
context = mock.Mock()
process_id = _uuid()
process = mock.Mock()
process.vpnservice = self.vpnservice
self.driver.processes = {
process_id: process}
self.driver.sync(context, [])
process.disable.assert_called_once_with()
self.assertNotIn(process_id, self.driver.processes)
def test_sync_removed_router(self):
self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
context = mock.Mock()
process_id = _uuid()
self.driver.sync(context, [{'id': process_id}])
self.assertNotIn(process_id, self.driver.processes)
def test_status_updated_on_connection_admin_down(self):
self.driver.process_status_cache = {
'1': {
'status': constants.ACTIVE,
'id': 123,
'updated_pending_status': False,
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False,
},
'20': {
'status': constants.ACTIVE,
'updated_pending_status': False,
}
}
}
}
# Simulate that there is no longer status for connection '20'
# e.g. connection admin down
new_status = {
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False
}
}
}
self.driver.update_downed_connections('1', new_status)
existing_conn = new_status['ipsec_site_connections'].get('10')
self.assertIsNotNone(existing_conn)
self.assertEqual(constants.ACTIVE, existing_conn['status'])
missing_conn = new_status['ipsec_site_connections'].get('20')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
def test_status_updated_on_service_admin_down(self):
self.driver.process_status_cache = {
'1': {
'status': constants.ACTIVE,
'id': 123,
'updated_pending_status': False,
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False,
},
'20': {
'status': constants.ACTIVE,
'updated_pending_status': False,
}
}
}
}
# Simulate that there are no connections now
new_status = {
'ipsec_site_connections': {}
}
self.driver.update_downed_connections('1', new_status)
missing_conn = new_status['ipsec_site_connections'].get('10')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
missing_conn = new_status['ipsec_site_connections'].get('20')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
def _test_status_handling_for_downed_connection(self, down_status):
"""Test status handling for downed connection."""
router_id = self.router.router_id
connection_id = FAKE_IPSEC_SITE_CONNECTION2_ID
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = down_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.ACTIVE, process_status['status'])
self.assertEqual(constants.DOWN,
ipsec_site_conn[connection_id]['status'])
def _test_status_handling_for_active_connection(self, active_status):
"""Test status handling for active connection."""
router_id = self.router.router_id
connection_id = FAKE_IPSEC_SITE_CONNECTION2_ID
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = active_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[
router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.ACTIVE, process_status['status'])
self.assertEqual(constants.ACTIVE,
ipsec_site_conn[connection_id]['status'])
def _test_connection_names_handling_for_multiple_subnets(self,
active_status):
"""Test connection names handling for multiple subnets."""
router_id = self.router.router_id
process = self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = active_status
names = process.get_established_connections()
self.assertEqual(2, len(names))
def _test_status_handling_for_deleted_connection(self,
not_running_status):
"""Test status handling for deleted connection."""
router_id = self.router.router_id
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = not_running_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.DOWN, process_status['status'])
self.assertFalse(ipsec_site_conn)
def _test_parse_connection_status(self, not_running_status,
active_status, down_status):
"""Test the status of ipsec-site-connection is parsed correctly."""
router_id = self.router.router_id
process = self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = not_running_status
self.assertFalse(process.active)
# An empty return value to simulate that the process
# does not have any status to report.
self._execute.return_value = ''
self.assertFalse(process.active)
self._execute.return_value = active_status
self.assertTrue(process.active)
self._execute.return_value = down_status
self.assertTrue(process.active)
def test_get_namespace_for_router(self):
namespace = self.driver.get_namespace(FAKE_ROUTER_ID)
self.assertEqual('qrouter-' + FAKE_ROUTER_ID, namespace)
def test_fail_getting_namespace_for_unknown_router(self):
self.assertFalse(self.driver.get_namespace('bogus_id'))
def test_add_nat_rule(self):
self.driver.add_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.add_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_add_nat_rule_with_no_router(self):
self.driver.add_nat_rule(
'bogus_router_id',
'fake_chain',
'fake_rule',
True)
self.assertFalse(self.iptables.add_rule.called)
def test_remove_rule(self):
self.driver.remove_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.remove_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_remove_rule_with_no_router(self):
self.driver.remove_nat_rule(
'bogus_router_id',
'fake_chain',
'fake_rule')
self.assertFalse(self.iptables.remove_rule.called)
def test_iptables_apply(self):
self.driver.iptables_apply(FAKE_ROUTER_ID)
self.apply_mock.assert_called_once_with()
def test_iptables_apply_with_no_router(self):
self.driver.iptables_apply('bogus_router_id')
self.assertFalse(self.apply_mock.called)
class IPSecDeviceDVR(BaseIPsecDeviceDriver):
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(IPSecDeviceDVR, self).setUp(driver, ipsec_process)
mock.patch.object(dvr_snat_ns.SnatNamespace, 'create').start()
self._make_dvr_edge_router_info_for_test()
def _make_dvr_edge_router_info_for_test(self):
router = dvr_edge_router.DvrEdgeRouter(mock.sentinel.agent,
mock.sentinel.myhost,
FAKE_ROUTER_ID,
**self.ri_kwargs)
router.router['distributed'] = True
router.snat_namespace = dvr_snat_ns.SnatNamespace(router.router['id'],
mock.sentinel.agent,
self.driver,
mock.ANY)
router.snat_namespace.create()
router.snat_iptables_manager = iptables_manager.IptablesManager(
namespace='snat-' + FAKE_ROUTER_ID, use_ipv6=mock.ANY)
router.snat_iptables_manager.ipv4['nat'] = self.iptables
router.snat_iptables_manager.apply = self.apply_mock
self.driver.routers[FAKE_ROUTER_ID] = router
def test_get_namespace_for_dvr_edge_router(self):
namespace = self.driver.get_namespace(FAKE_ROUTER_ID)
self.assertEqual('snat-' + FAKE_ROUTER_ID, namespace)
def test_add_nat_rule_with_dvr_edge_router(self):
self.driver.add_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.add_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_iptables_apply_with_dvr_edge_router(self):
self.driver.iptables_apply(FAKE_ROUTER_ID)
self.apply_mock.assert_called_once_with()
def test_remove_rule_with_dvr_edge_router(self):
self.driver.remove_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.remove_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
class TestOpenSwanConfigGeneration(BaseIPsecDeviceDriver):
"""Verify that configuration files are generated correctly.
Besides the normal translation of some settings, when creating the config
file, the generated file can also vary based on the following
special conditions:
- IPv6 versus IPv4
- Multiple left subnets versus a single left subnet
- IPSec policy using AH transform
The tests will focus on these variations.
"""
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(TestOpenSwanConfigGeneration, self).setUp(
driver, ipsec_process, vpnservice=FAKE_VPN_SERVICE)
self.conf.register_opts(openswan_ipsec.openswan_opts, 'openswan')
self.conf.set_override('state_path', '/tmp')
self.ipsec_template = self.conf.openswan.ipsec_config_template
self.process = openswan_ipsec.OpenSwanProcess(self.conf,
'foo-process-id',
self.vpnservice,
mock.ANY)
def build_ipsec_expected_config_for_test(self, info):
"""Modify OpenSwan ipsec expected config files for test variations."""
auth_mode = info.get('ipsec_auth', AUTH_ESP)
conn_details = OPENSWAN_CONNECTION_DETAILS % {'auth_mode': auth_mode,
'dpd_action': 'hold',
'dpd_delay': 30,
'dpd_timeout': 120,
'ike_lifetime': 3600,
'life_time': 3600,
'encapsulation_mode': 'tunnel'}
# Convert local CIDRs into assignment strings. IF more than one,
# pluralize the attribute name and enclose in brackets.
cidrs = info.get('local_cidrs', [['10.0.0.0/24'], ['192.168.127.12/24']])
local_cidrs = []
for cidr in cidrs:
if len(cidr) == 2:
local_cidrs.append("s={ %s }" % ' '.join(cidr))
else:
local_cidrs.append("=%s" % cidr[0])
# Convert peer CIDRs into space separated strings
cidrs = info.get('peer_cidrs', [['172.16.31.10/24', '192.168.127.12/24'],
['172.16.17.32/24', '172.16.17.32/24']])
peer_cidrs = [' '.join(cidr) for cidr in cidrs]
local_ip = info.get('local', '172.16.58.3')
version = info.get('local_ip_vers', 4)
next_hop = IPV4_NEXT_HOP if version == 4 else IPV6_NEXT_HOP % local_ip
peer_ips = info.get('peers', ['192.168.127.12', '192.168.3.11'])
return EXPECTED_OPENSWAN_CONF % {
'next_hop': next_hop,
'local_cidrs1': local_cidrs[0], 'local_cidrs2': local_cidrs[1],
'local_ver': version,
'peer_cidrs1': peer_cidrs[0], 'peer_cidrs2': peer_cidrs[1],
'left': local_ip,
'right1': peer_ips[0], 'right2': peer_ips[1],
'conn1_id': FAKE_IPSEC_SITE_CONNECTION1_ID,
'conn2_id': FAKE_IPSEC_SITE_CONNECTION2_ID,
'conn_details': conn_details}
def test_connections_with_esp_transform_protocol(self):
"""Test config file with IPSec policy using ESP."""
self._test_ipsec_connection_config({})
def test_connections_with_ah_transform_protocol(self):
"""Test config file with IPSec policy using ESP."""
overrides = {'ipsec_auth': 'ah'}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
info = {'ipsec_auth': AUTH_AH}
self._test_ipsec_connection_config(info)
def test_connections_with_multiple_left_subnets(self):
"""Test multiple local subnets.
The configure uses the 'leftsubnets' attribute, instead of the
'leftsubnet' attribute.
"""
overrides = {'local_cidrs': [['10.0.0.0/24', '192.168.127.12/24'],
['172.16.58.3/24', '192.168.3.11/24']]}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
self._test_ipsec_connection_config(overrides)
def test_config_files_with_ipv6_addresses(self):
"""Test creating config files using IPv6 addressing."""
overrides = {'local_cidrs': [['2002:0a00::/48'], ['2002:0b00::/48']],
'local_ip_vers': 6,
'peer_cidrs': [['2002:1400::/48', '2002:1e00::/48'],
['2002:2800::/48', '2002:3200::/48']],
'local': '2002:3c00:0004::',
'peers': ['2002:3c00:0005::', '2002:3c00:0006::'],
'local_id': '2002:3c00:0004::'}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
self._test_ipsec_connection_config(overrides)
def test_secrets_config_file(self):
expected = EXPECTED_IPSEC_OPENSWAN_SECRET_CONF
actual = self.process._gen_config_content(
self.conf.openswan.ipsec_secret_template, self.vpnservice)
self.check_config_file(expected, actual)
class IPsecStrongswanConfigGeneration(BaseIPsecDeviceDriver):
def setUp(self, driver=strongswan_ipsec.StrongSwanDriver,
ipsec_process=strongswan_ipsec.StrongSwanProcess):
super(IPsecStrongswanConfigGeneration, self).setUp(
driver, ipsec_process, vpnservice=FAKE_VPN_SERVICE)
self.conf.register_opts(strongswan_ipsec.strongswan_opts,
'strongswan')
self.conf.set_override('state_path', '/tmp')
self.ipsec_template = self.conf.strongswan.ipsec_config_template
self.process = strongswan_ipsec.StrongSwanProcess(self.conf,
'foo-process-id',
self.vpnservice,
mock.ANY)
def build_ipsec_expected_config_for_test(self, info):
cidrs = info.get('local_cidrs', [['10.0.0.0/24'], ['192.168.127.12/24']])
local_cidrs = [','.join(cidr) for cidr in cidrs]
cidrs = info.get('peer_cidrs', | |
<reponame>Bhanditz/spyder<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Mix-in classes
These classes were created to be able to provide Spyder's regular text and
console widget features to an independant widget based on QTextEdit for the
IPython console plugin.
"""
# Standard library imports
from xml.sax.saxutils import escape
import os
import os.path as osp
import re
import sre_constants
import textwrap
# Third party imports
from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QCursor, QTextCursor, QTextDocument
from qtpy.QtWidgets import QApplication, QToolTip
from qtpy import QT_VERSION
# Local imports
from spyder.config.base import _
from spyder.py3compat import is_text_string, to_text_string
from spyder.utils import encoding, sourcecode, programs
from spyder.utils.dochelpers import (getargspecfromtext, getobj,
getsignaturefromtext)
from spyder.utils.misc import get_error_match
from spyder.widgets.arraybuilder import NumpyArrayDialog
QT55_VERSION = programs.check_version(QT_VERSION, "5.5", ">=")
if QT55_VERSION:
from qtpy.QtCore import QRegularExpression
else:
from qtpy.QtCore import QRegExp
class BaseEditMixin(object):
def __init__(self):
self.eol_chars = None
self.calltip_size = 600
#------Line number area
def get_linenumberarea_width(self):
"""Return line number area width"""
# Implemented in CodeEditor, but needed for calltip/completion widgets
return 0
def calculate_real_position(self, point):
"""
Add offset to a point, to take into account the Editor panels.
This is reimplemented in CodeEditor, in other widgets it returns
the same point.
"""
return point
#------Calltips
def _format_signature(self, text):
formatted_lines = []
name = text.split('(')[0]
rows = textwrap.wrap(text, width=50,
subsequent_indent=' '*(len(name)+1))
for r in rows:
r = escape(r) # Escape most common html chars
r = r.replace(' ', ' ')
for char in ['=', ',', '(', ')', '*', '**']:
r = r.replace(char,
'<span style=\'color: red; font-weight: bold\'>' + \
char + '</span>')
formatted_lines.append(r)
signature = '<br>'.join(formatted_lines)
return signature, rows
def show_calltip(self, title, text, signature=False, color='#2D62FF',
at_line=None, at_position=None):
"""Show calltip"""
if text is None or len(text) == 0:
return
# Saving cursor position:
if at_position is None:
at_position = self.get_position('cursor')
self.calltip_position = at_position
# Preparing text:
if signature:
text, wrapped_textlines = self._format_signature(text)
else:
if isinstance(text, list):
text = "\n ".join(text)
text = text.replace('\n', '<br>')
if len(text) > self.calltip_size:
text = text[:self.calltip_size] + " ..."
# Formatting text
font = self.font()
size = font.pointSize()
family = font.family()
format1 = '<div style=\'font-family: "%s"; font-size: %spt; color: %s\'>'\
% (family, size, color)
format2 = '<div style=\'font-family: "%s"; font-size: %spt\'>'\
% (family, size-1 if size > 9 else size)
tiptext = format1 + ('<b>%s</b></div>' % title) + '<hr>' + \
format2 + text + "</div>"
# Showing tooltip at cursor position:
cx, cy = self.get_coordinates('cursor')
if at_line is not None:
cx = 5
cursor = QTextCursor(self.document().findBlockByNumber(at_line-1))
cy = self.cursorRect(cursor).top()
point = self.mapToGlobal(QPoint(cx, cy))
point = self.calculate_real_position(point)
point.setY(point.y()+font.pointSize()+5)
if signature:
self.calltip_widget.show_tip(point, tiptext, wrapped_textlines)
else:
QToolTip.showText(point, tiptext)
#------EOL characters
def set_eol_chars(self, text):
"""Set widget end-of-line (EOL) characters from text (analyzes text)"""
if not is_text_string(text): # testing for QString (PyQt API#1)
text = to_text_string(text)
eol_chars = sourcecode.get_eol_chars(text)
is_document_modified = eol_chars is not None and self.eol_chars is not None
self.eol_chars = eol_chars
if is_document_modified:
self.document().setModified(True)
if self.sig_eol_chars_changed is not None:
self.sig_eol_chars_changed.emit(eol_chars)
def get_line_separator(self):
"""Return line separator based on current EOL mode"""
if self.eol_chars is not None:
return self.eol_chars
else:
return os.linesep
def get_text_with_eol(self):
"""Same as 'toPlainText', replace '\n'
by correct end-of-line characters"""
utext = to_text_string(self.toPlainText())
lines = utext.splitlines()
linesep = self.get_line_separator()
txt = linesep.join(lines)
if utext.endswith('\n'):
txt += linesep
return txt
#------Positions, coordinates (cursor, EOF, ...)
def get_position(self, subject):
"""Get offset in character for the given subject from the start of
text edit area"""
cursor = self.textCursor()
if subject == 'cursor':
pass
elif subject == 'sol':
cursor.movePosition(QTextCursor.StartOfBlock)
elif subject == 'eol':
cursor.movePosition(QTextCursor.EndOfBlock)
elif subject == 'eof':
cursor.movePosition(QTextCursor.End)
elif subject == 'sof':
cursor.movePosition(QTextCursor.Start)
else:
# Assuming that input argument was already a position
return subject
return cursor.position()
def get_coordinates(self, position):
position = self.get_position(position)
cursor = self.textCursor()
cursor.setPosition(position)
point = self.cursorRect(cursor).center()
return point.x(), point.y()
def get_cursor_line_column(self):
"""Return cursor (line, column) numbers"""
cursor = self.textCursor()
return cursor.blockNumber(), cursor.columnNumber()
def get_cursor_line_number(self):
"""Return cursor line number"""
return self.textCursor().blockNumber()+1
def set_cursor_position(self, position):
"""Set cursor position"""
position = self.get_position(position)
cursor = self.textCursor()
cursor.setPosition(position)
self.setTextCursor(cursor)
self.ensureCursorVisible()
def move_cursor(self, chars=0):
"""Move cursor to left or right (unit: characters)"""
direction = QTextCursor.Right if chars > 0 else QTextCursor.Left
for _i in range(abs(chars)):
self.moveCursor(direction, QTextCursor.MoveAnchor)
def is_cursor_on_first_line(self):
"""Return True if cursor is on the first line"""
cursor = self.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
return cursor.atStart()
def is_cursor_on_last_line(self):
"""Return True if cursor is on the last line"""
cursor = self.textCursor()
cursor.movePosition(QTextCursor.EndOfBlock)
return cursor.atEnd()
def is_cursor_at_end(self):
"""Return True if cursor is at the end of the text"""
return self.textCursor().atEnd()
def is_cursor_before(self, position, char_offset=0):
"""Return True if cursor is before *position*"""
position = self.get_position(position) + char_offset
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
if position < cursor.position():
cursor.setPosition(position)
return self.textCursor() < cursor
def __move_cursor_anchor(self, what, direction, move_mode):
assert what in ('character', 'word', 'line')
if what == 'character':
if direction == 'left':
self.moveCursor(QTextCursor.PreviousCharacter, move_mode)
elif direction == 'right':
self.moveCursor(QTextCursor.NextCharacter, move_mode)
elif what == 'word':
if direction == 'left':
self.moveCursor(QTextCursor.PreviousWord, move_mode)
elif direction == 'right':
self.moveCursor(QTextCursor.NextWord, move_mode)
elif what == 'line':
if direction == 'down':
self.moveCursor(QTextCursor.NextBlock, move_mode)
elif direction == 'up':
self.moveCursor(QTextCursor.PreviousBlock, move_mode)
def move_cursor_to_next(self, what='word', direction='left'):
"""
Move cursor to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
"""
self.__move_cursor_anchor(what, direction, QTextCursor.MoveAnchor)
#------Selection
def clear_selection(self):
"""Clear current selection"""
cursor = self.textCursor()
cursor.clearSelection()
self.setTextCursor(cursor)
def extend_selection_to_next(self, what='word', direction='left'):
"""
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
"""
self.__move_cursor_anchor(what, direction, QTextCursor.KeepAnchor)
#------Text: get, set, ...
def __select_text(self, position_from, position_to):
position_from = self.get_position(position_from)
position_to = self.get_position(position_to)
cursor = self.textCursor()
cursor.setPosition(position_from)
cursor.setPosition(position_to, QTextCursor.KeepAnchor)
return cursor
def get_text_line(self, line_nb):
"""Return text line at line number *line_nb*"""
# Taking into account the case when a file ends in an empty line,
# since splitlines doesn't return that line as the last element
# TODO: Make this function more efficient
try:
return to_text_string(self.toPlainText()).splitlines()[line_nb]
except IndexError:
return self.get_line_separator()
def get_text(self, position_from, position_to):
"""
Return text between *position_from* and *position_to*
Positions may be positions or 'sol', 'eol', 'sof', 'eof' or 'cursor'
"""
cursor = self.__select_text(position_from, position_to)
text = to_text_string(cursor.selectedText())
all_text = position_from == 'sof' and position_to == 'eof'
if text and not all_text:
while text.endswith("\n"):
text = text[:-1]
while text.endswith(u"\u2029"):
text = text[:-1]
return text
def get_character(self, position, offset=0):
"""Return character at *position* with the given offset."""
position = self.get_position(position) + offset
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
if position < cursor.position():
cursor.setPosition(position)
cursor.movePosition(QTextCursor.Right,
QTextCursor.KeepAnchor)
return to_text_string(cursor.selectedText())
else:
return ''
def insert_text(self, text):
"""Insert text at cursor position"""
if not self.isReadOnly():
self.textCursor().insertText(text)
def replace_text(self, position_from, position_to, text):
cursor = self.__select_text(position_from, position_to)
cursor.removeSelectedText()
cursor.insertText(text)
def remove_text(self, position_from, position_to):
cursor = self.__select_text(position_from, position_to)
cursor.removeSelectedText()
def get_current_word(self):
"""Return current word, i.e. word at cursor position"""
cursor = self.textCursor()
if cursor.hasSelection():
# Removes the selection and moves the cursor to the left side
# of the selection: this is required to be able to properly
# select the whole word under cursor (otherwise, the same word is
# not selected when the cursor is at the right side of it):
cursor.setPosition(min([cursor.selectionStart(),
cursor.selectionEnd()]))
else:
# Checks if the first character to the right is a white space
# and if not, moves the cursor one word to the left (otherwise,
# if the character to the left do not match the "word regexp"
# (see below), the word to the left of the cursor won't be
# selected), but only if the first character to the left is not a
# white space too.
def is_space(move):
curs = self.textCursor()
curs.movePosition(move, QTextCursor.KeepAnchor)
return not to_text_string(curs.selectedText()).strip()
if is_space(QTextCursor.NextCharacter):
if is_space(QTextCursor.PreviousCharacter):
return
cursor.movePosition(QTextCursor.WordLeft)
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
# find a valid python variable name
match = re.findall(r'([^\d\W]\w*)', text, re.UNICODE)
if match:
return match[0]
def get_current_line(self):
"""Return current line's text"""
cursor = self.textCursor()
cursor.select(QTextCursor.BlockUnderCursor)
return to_text_string(cursor.selectedText())
def get_current_line_to_cursor(self):
"""Return text from prompt to cursor"""
return self.get_text(self.current_prompt_pos, 'cursor')
def get_line_number_at(self, coordinates):
"""Return line number at *coordinates* (QPoint)"""
cursor = self.cursorForPosition(coordinates)
return cursor.blockNumber()-1
def get_line_at(self, coordinates):
"""Return line at *coordinates* | |
"03-016",
"nombre_ref": "CANTIL DEL PEDREGAL",
"pob_2010": 660
},
{
"id_ref": 449,
"del_ref": "COYOACAN",
"cve_col": "03-017",
"nombre_ref": "CARM<NAME>",
"pob_2010": 8198
},
{
"id_ref": 450,
"del_ref": "COYOACAN",
"cve_col": "03-155",
"nombre_ref": "CENTRO URBANO (U HAB)",
"pob_2010": 437
},
{
"id_ref": 451,
"del_ref": "COYOACAN",
"cve_col": "03-018",
"nombre_ref": "CENTRO URBANO TLALPAN (U HAB)",
"pob_2010": 1154
},
{
"id_ref": 452,
"del_ref": "COYOACAN",
"cve_col": "03-019",
"nombre_ref": "CHIMALISTAC",
"pob_2010": 832
},
{
"id_ref": 453,
"del_ref": "COYOACAN",
"cve_col": "03-020",
"nombre_ref": "CIUDAD JARDIN",
"pob_2010": 2763
},
{
"id_ref": 454,
"del_ref": "COYOACAN",
"cve_col": "03-021",
"nombre_ref": "CIUDAD UNIVERSITARIA",
"pob_2010": 280
},
{
"id_ref": 455,
"del_ref": "COYOACAN",
"cve_col": "03-022",
"nombre_ref": "COPILCO EL ALTO",
"pob_2010": 3733
},
{
"id_ref": 456,
"del_ref": "COYOACAN",
"cve_col": "03-023",
"nombre_ref": "COPILCO EL BAJO",
"pob_2010": 4613
},
{
"id_ref": 457,
"del_ref": "COYOACAN",
"cve_col": "03-024",
"nombre_ref": "COPILCO UNIVERSIDAD",
"pob_2010": 3537
},
{
"id_ref": 458,
"del_ref": "COYOACAN",
"cve_col": "03-025",
"nombre_ref": "COUNTRY CLUB",
"pob_2010": 2965
},
{
"id_ref": 459,
"del_ref": "COYOACAN",
"cve_col": "03-026",
"nombre_ref": "CROC CULHUACAN SECC 6 (U HAB)",
"pob_2010": 2567
},
{
"id_ref": 460,
"del_ref": "COYOACAN",
"cve_col": "03-027",
"nombre_ref": "CTM IX CULHUACAN ZONA 29-30 (U HAB)",
"pob_2010": 4204
},
{
"id_ref": 461,
"del_ref": "COYOACAN",
"cve_col": "03-156",
"nombre_ref": "CTM IX CULHUACAN ZONA 32-33 (U HAB)",
"pob_2010": 4424
},
{
"id_ref": 462,
"del_ref": "COYOACAN",
"cve_col": "03-028",
"nombre_ref": "CTM IXA CULHUACAN (U HAB)",
"pob_2010": 3313
},
{
"id_ref": 463,
"del_ref": "COYOACAN",
"cve_col": "03-029",
"nombre_ref": "CTM V CULHUACAN (U HAB)",
"pob_2010": 7155
},
{
"id_ref": 464,
"del_ref": "COYOACAN",
"cve_col": "03-030",
"nombre_ref": "CTM VI CULHUACAN (U HAB)",
"pob_2010": 15762
},
{
"id_ref": 465,
"del_ref": "COYOACAN",
"cve_col": "03-031",
"nombre_ref": "CTM VII CULHUACAN (U HAB)",
"pob_2010": 8127
},
{
"id_ref": 466,
"del_ref": "COYOACAN",
"cve_col": "03-032",
"nombre_ref": "CTM VIIA CULHUACAN (U HAB)",
"pob_2010": 1262
},
{
"id_ref": 467,
"del_ref": "COYOACAN",
"cve_col": "03-033",
"nombre_ref": "CTM VIII CULHUACAN (U HAB)",
"pob_2010": 10991
},
{
"id_ref": 468,
"del_ref": "COYOACAN",
"cve_col": "03-034",
"nombre_ref": "CTM X CULHUACAN (U HAB)",
"pob_2010": 5384
},
{
"id_ref": 469,
"del_ref": "COYOACAN",
"cve_col": "03-035",
"nombre_ref": "CUADRANTE DE SAN FRANCISCO",
"pob_2010": 3278
},
{
"id_ref": 470,
"del_ref": "COYOACAN",
"cve_col": "03-036",
"nombre_ref": "DE LA CANDELARIA (PBLO)",
"pob_2010": 7747
},
{
"id_ref": 471,
"del_ref": "COYOACAN",
"cve_col": "03-037",
"nombre_ref": "DEL CARMEN",
"pob_2010": 10460
},
{
"id_ref": 472,
"del_ref": "COYOACAN",
"cve_col": "03-038",
"nombre_ref": "DEL NIÑO JESUS (BARR)",
"pob_2010": 2861
},
{
"id_ref": 473,
"del_ref": "COYOACAN",
"cve_col": "03-039",
"nombre_ref": "EDUCACION",
"pob_2010": 7777
},
{
"id_ref": 474,
"del_ref": "COYOACAN",
"cve_col": "03-041",
"nombre_ref": "EL CARACOL",
"pob_2010": 2447
},
{
"id_ref": 475,
"del_ref": "COYOACAN",
"cve_col": "03-042",
"nombre_ref": "EL CENTINELA",
"pob_2010": 2091
},
{
"id_ref": 476,
"del_ref": "COYOACAN",
"cve_col": "03-043",
"nombre_ref": "EL MIRADOR",
"pob_2010": 1778
},
{
"id_ref": 477,
"del_ref": "COYOACAN",
"cve_col": "03-044",
"nombre_ref": "EL PARQUE DE COYOACAN (FRACC)",
"pob_2010": 1730
},
{
"id_ref": 478,
"del_ref": "COYOACAN",
"cve_col": "03-045",
"nombre_ref": "EL RELOJ",
"pob_2010": 3196
},
{
"id_ref": 479,
"del_ref": "COYOACAN",
"cve_col": "03-046",
"nombre_ref": "EL ROSEDAL I",
"pob_2010": 1882
},
{
"id_ref": 480,
"del_ref": "COYOACAN",
"cve_col": "03-149",
"nombre_ref": "EL ROSEDAL II",
"pob_2010": 2210
},
{
"id_ref": 481,
"del_ref": "COYOACAN",
"cve_col": "03-047",
"nombre_ref": "EL VERGEL DE COYOACAN ( INFONAVIT EL HUESO) (U HAB)",
"pob_2010": 688
},
{
"id_ref": 482,
"del_ref": "COYOACAN",
"cve_col": "03-048",
"nombre_ref": "EMILIANO ZAPATA",
"pob_2010": 1351
},
{
"id_ref": 483,
"del_ref": "COYOACAN",
"cve_col": "03-049",
"nombre_ref": "EMILIANO ZAPATA (U HAB)",
"pob_2010": 6011
},
{
"id_ref": 484,
"del_ref": "COYOACAN",
"cve_col": "03-050",
"nombre_ref": "ESPARTACO",
"pob_2010": 4687
},
{
"id_ref": 485,
"del_ref": "COYOACAN",
"cve_col": "03-150",
"nombre_ref": "EX EJIDO DE CHURUBUSCO",
"pob_2010": 159
},
{
"id_ref": 486,
"del_ref": "COYOACAN",
"cve_col": "03-040",
"nombre_ref": "EX EJIDO SAN FRANCISCO CULHUACAN I",
"pob_2010": 634
},
{
"id_ref": 487,
"del_ref": "COYOACAN",
"cve_col": "03-157",
"nombre_ref": "EX EJIDO SAN FRANCISCO CULHUACAN II",
"pob_2010": 1283
},
{
"id_ref": 488,
"del_ref": "COYOACAN",
"cve_col": "03-158",
"nombre_ref": "EX EJIDO SAN FRANCISCO CULHUACAN III",
"pob_2010": 1251
},
{
"id_ref": 489,
"del_ref": "COYOACAN",
"cve_col": "03-051",
"nombre_ref": "EX HACIENDA COAPA",
"pob_2010": 1511
},
{
"id_ref": 490,
"del_ref": "COYOACAN",
"cve_col": "03-052",
"nombre_ref": "HACIENDAS DE COYOACAN (FRACC)",
"pob_2010": 1980
},
{
"id_ref": 491,
"del_ref": "COYOACAN",
"cve_col": "03-053",
"nombre_ref": "HERMOSILLO",
"pob_2010": 1744
},
{
"id_ref": 492,
"del_ref": "COYOACAN",
"cve_col": "03-054",
"nombre_ref": "IMAN",
"pob_2010": 1134
},
{
"id_ref": 493,
"del_ref": "COYOACAN",
"cve_col": "03-055",
"nombre_ref": "IMAN 580 (U HAB)",
"pob_2010": 2655
},
{
"id_ref": 494,
"del_ref": "COYOACAN",
"cve_col": "03-056",
"nombre_ref": "INFONAVIT CULHUACAN ZONA 1 (U HAB)",
"pob_2010": 1009
},
{
"id_ref": 495,
"del_ref": "COYOACAN",
"cve_col": "03-057",
"nombre_ref": "INFONAVIT CULHUACAN ZONA 2 (U HAB)",
"pob_2010": 1731
},
{
"id_ref": 496,
"del_ref": "COYOACAN",
"cve_col": "03-058",
"nombre_ref": "INFONAVIT CULHUACAN ZONA 3 (U HAB)",
"pob_2010": 1864
},
{
"id_ref": 497,
"del_ref": "COYOACAN",
"cve_col": "03-059",
"nombre_ref": "INSURGENTES CUICUILCO",
"pob_2010": 3345
},
{
"id_ref": 498,
"del_ref": "COYOACAN",
"cve_col": "03-060",
"nombre_ref": "INTEGRACION LATINOAMERICANA (U HAB)",
"pob_2010": 3775
},
{
"id_ref": 499,
"del_ref": "COYOACAN",
"cve_col": "03-061",
"nombre_ref": "<NAME> (FRACC)",
"pob_2010": 2390
},
{
"id_ref": 500,
"del_ref": "COYOACAN",
"cve_col": "03-062",
"nombre_ref": "JARDINES DEL PEDREGAL",
"pob_2010": 1141
},
{
"id_ref": 501,
"del_ref": "COYOACAN",
"cve_col": "03-063",
"nombre_ref": "JOY<NAME> PEDREGAL (FRACC)",
"pob_2010": 1309
},
{
"id_ref": 502,
"del_ref": "COYOACAN",
"cve_col": "03-064",
"nombre_ref": "LA CANTERA (U HAB)",
"pob_2010": 1188
},
{
"id_ref": 503,
"del_ref": "COYOACAN",
"cve_col": "03-065",
"nombre_ref": "LA CONCEPCION (BARR)",
"pob_2010": 1393
},
{
"id_ref": 504,
"del_ref": "COYOACAN",
"cve_col": "03-066",
"nombre_ref": "LA MAGDALENA CULHUACAN (BARR)",
"pob_2010": 11249
},
{
"id_ref": 505,
"del_ref": "COYOACAN",
"cve_col": "03-067",
"nombre_ref": "LA VIRGEN 1170 (U HAB)",
"pob_2010": 1246
},
{
"id_ref": 506,
"del_ref": "COYOACAN",
"cve_col": "03-068",
"nombre_ref": "LAS CABAÑAS",
"pob_2010": 379
},
{
"id_ref": 507,
"del_ref": "COYOACAN",
"cve_col": "03-069",
"nombre_ref": "LAS CAMPANAS",
"pob_2010": 678
},
{
"id_ref": 508,
"del_ref": "COYOACAN",
"cve_col": "03-070",
"nombre_ref": "LAS TROJES COAPA (U HAB)",
"pob_2010": 484
},
{
"id_ref": 509,
"del_ref": "COYOACAN",
"cve_col": "03-071",
"nombre_ref": "LOS CEDROS (FRACC)",
"pob_2010": 951
},
{
"id_ref": 510,
"del_ref": "COYOACAN",
"cve_col": "03-072",
"nombre_ref": "LOS CIPRESES",
"pob_2010": 1738
},
{
"id_ref": 511,
"del_ref": "COYOACAN",
"cve_col": "03-073",
"nombre_ref": "LOS GIRASOLES I",
"pob_2010": 660
},
{
"id_ref": 512,
"del_ref": "COYOACAN",
"cve_col": "03-074",
"nombre_ref": "LOS GIRASOLES II",
"pob_2010": 1583
},
{
"id_ref": 513,
"del_ref": "COYOACAN",
"cve_col": "03-075",
"nombre_ref": "LOS GIRASOLES III",
"pob_2010": 3482
},
{
"id_ref": 514,
"del_ref": "COYOACAN",
"cve_col": "03-076",
"nombre_ref": "LOS OLIVOS (FRACC)",
"pob_2010": 1184
},
{
"id_ref": 515,
"del_ref": "COYOACAN",
"cve_col": "03-077",
"nombre_ref": "LOS OLIVOS (U HAB)",
"pob_2010": 493
},
{
"id_ref": 516,
"del_ref": "COYOACAN",
"cve_col": "03-078",
"nombre_ref": "LOS REYES (PBLO)",
"pob_2010": 16673
},
{
"id_ref": 517,
"del_ref": "COYOACAN",
"cve_col": "03-079",
"nombre_ref": "LOS ROBLES (FRACC)",
"pob_2010": 707
},
{
"id_ref": 518,
"del_ref": "COYOACAN",
"cve_col": "03-080",
"nombre_ref": "LOS SAUCES (FRACC)",
"pob_2010": 944
},
{
"id_ref": 519,
"del_ref": "COYOACAN",
"cve_col": "03-081",
"nombre_ref": "MEDIA LUNA",
"pob_2010": 485
},
{
"id_ref": 520,
"del_ref": "COYOACAN",
"cve_col": "03-151",
"nombre_ref": "MONTE DE PIEDAD",
"pob_2010": 2193
},
{
"id_ref": 521,
"del_ref": "COYOACAN",
"cve_col": "03-082",
"nombre_ref": "NUEVA DIAZ ORDAZ",
"pob_2010": 2115
},
{
"id_ref": 522,
"del_ref": "COYOACAN",
"cve_col": "03-083",
"nombre_ref": "OLIMPICA",
"pob_2010": 1129
},
{
"id_ref": 523,
"del_ref": "COYOACAN",
"cve_col": "03-084",
"nombre_ref": "OXTOPULCO UNIVERSIDAD",
"pob_2010": 1388
},
{
"id_ref": 524,
"del_ref": "COYOACAN",
"cve_col": "03-085",
"nombre_ref": "<NAME>",
"pob_2010": 5416
},
{
"id_ref": 525,
"del_ref": "COYOACAN",
"cve_col": "03-133",
"nombre_ref": "PASEOS DE TAXQUEÑA I",
"pob_2010": 7887
},
{
"id_ref": 526,
"del_ref": "COYOACAN",
"cve_col": "03-134",
"nombre_ref": "PASEOS DE TAXQUEÑA II",
"pob_2010": 6292
},
{
"id_ref": 527,
"del_ref": "COYOACAN",
"cve_col": "03-087",
"nombre_ref": "PEDREGAL DE LA ZORRA",
"pob_2010": 1506
},
{
"id_ref": 528,
"del_ref": "COYOACAN",
"cve_col": "03-088",
"nombre_ref": "PEDREGAL DE SAN ANGEL (AMPL)",
"pob_2010": 1526
},
{
"id_ref": 529,
"del_ref": "COYOACAN",
"cve_col": "03-089",
"nombre_ref": "PEDREGAL DE SAN FRANCISCO (FRACC)",
"pob_2010": 1543
},
{
"id_ref": 530,
"del_ref": "COYOACAN",
"cve_col": "03-135",
"nombre_ref": "PEDREGAL DE SANTA URSULA I",
"pob_2010": 9836
},
{
"id_ref": 531,
"del_ref": "COYOACAN",
"cve_col": "03-136",
"nombre_ref": "PEDREGAL DE SANTA URSULA II",
"pob_2010": 10813
},
{
"id_ref": 532,
"del_ref": "COYOACAN",
"cve_col": "03-137",
"nombre_ref": "PEDREGAL DE SANTA URSULA III",
"pob_2010": 9939
},
{
"id_ref": 533,
"del_ref": "COYOACAN",
"cve_col": "03-138",
"nombre_ref": "PEDREGAL DE SANTA URSULA IV",
"pob_2010": 11510
},
{
"id_ref": 534,
"del_ref": "COYOACAN",
"cve_col": "03-139",
"nombre_ref": "PEDREGAL DE STO DOMINGO I",
"pob_2010": 9809
},
{
"id_ref": 535,
"del_ref": "COYOACAN",
"cve_col": "03-140",
"nombre_ref": "PEDREGAL DE STO DOMINGO II",
"pob_2010": | |
<reponame>filemaster/aihwkit
# -*- coding: utf-8 -*-
# (C) Copyright 2020 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Configuration for Analog (Resistive Device) tiles."""
# pylint: disable=too-many-instance-attributes
from dataclasses import dataclass, field
from typing import ClassVar, List, Type
from copy import deepcopy
from aihwkit.exceptions import ConfigError
from aihwkit.simulator.rpu_base import devices
from aihwkit.simulator.configs.helpers import (
_PrintableMixin, parameters_to_bindings
)
from aihwkit.simulator.configs.utils import (
IOParameters, UpdateParameters, VectorUnitCellUpdatePolicy
)
@dataclass
class FloatingPointDevice(_PrintableMixin):
"""Floating point reference.
Implements ideal devices forward/backward/update behavior.
"""
bindings_class: ClassVar[Type] = devices.FloatingPointTileParameter
diffusion: float = 0.0
"""Standard deviation of diffusion process."""
lifetime: float = 0.0
r"""One over `decay_rate`, ie :math:`1/r_\text{decay}`."""
def as_bindings(self) -> devices.FloatingPointTileParameter:
"""Return a representation of this instance as a simulator bindings object."""
return parameters_to_bindings(self)
def requires_diffusion(self) -> bool:
"""Return whether device has diffusion enabled."""
return self.diffusion > 0.0
def requires_decay(self) -> bool:
"""Return whether device has decay enabled."""
return self.lifetime > 0.0
@dataclass
class PulsedDevice(_PrintableMixin):
r"""Pulsed update resistive devices.
Device are used as part of an
:class:`~aihwkit.simulator.tiles.AnalogTile` to implement the
`update once` characteristics, i.e. the material response properties
when a single update pulse is given (a coincidence between row and
column pulse train happened).
Common properties of all pulsed devices include:
**Reset**:
Resets the weight in cross points to (around) zero with
cycle-to-cycle and systematic spread around a mean.
**Decay**:
.. math:: w_{ij} \leftarrow w_{ij}\,(1-\alpha_\text{decay}\delta_{ij})
Weight decay is generally off and has to be activated explicitly
by using :meth:`decay` on an analog tile. Note that the device
``decay_lifetime`` parameters (1 over decay rates
:math:`\delta_{ij}`) are analog tile specific and are thus set and
fixed during RPU initialization. :math:`\alpha_\text{decay}` is a
scaling factor that can be given during run-time.
**Diffusion**:
Similar to the decay, diffusion is only activated by inserting a specific
operator. However, the parameters of the diffusion
process are set during RPU initialization and are fixed for the
remainder.
.. math:: w_{ij} \leftarrow w_{ij} + \rho_{ij} \, \xi;
where :math:`xi` is a standard Gaussian variable and :math:`\rho_{ij}` the
diffusion rate for a cross-point `ij`
Note:
If diffusion happens to move the weight beyond the hard bounds of the
weight it is ensured to be clipped appropriately.
"""
bindings_class: ClassVar[Type] = devices.PulsedResistiveDeviceParameter
construction_seed: int = 0
"""If not equal 0, will set a unique seed for hidden parameters
during construction"""
corrupt_devices_prob: float = 0.0
"""Probability for devices to be corrupt (weights fixed to random value
with hard bounds, that is min and max bounds are set to equal)."""
corrupt_devices_range: int = 1000
"""Range around zero for establishing corrupt devices."""
diffusion: float = 0.0
"""Standard deviation of diffusion process."""
diffusion_dtod: float = 0.0
"""Device-to device variation of diffusion rate in relative units."""
dw_min: float = 0.001
"""Mean of the minimal update step sizes across devices and directions."""
dw_min_dtod: float = 0.3
"""Device-to-device std deviation of dw_min (in relative units to ``dw_min``)."""
dw_min_std: float = 0.3
r"""Cycle-to-cycle variation size of the update step (related to
:math:`\sigma_\text{c-to-c}` above) in relative units to ``dw_min``.
Note:
Many spread (device-to-device variation) parameters are
given in relative units. For instance e.g. a setting of
``dw_min_std`` of 0.1 would mean 10% spread around the
mean and thus a resulting standard deviation
(:math:`\sigma_\text{c-to-c}`) of ``dw_min`` *
``dw_min_std``.
"""
enforce_consistency: bool = True
"""Whether to enforce during initialization that max weight bounds cannot
be smaller than min weight bounds, and up direction step size is positive
and down negative. Switches the opposite values if encountered during
init."""
lifetime: float = 0.0
r"""One over `decay_rate`, ie :math:`1/r_\text{decay}`."""
lifetime_dtod: float = 0.0
"""Device-to-device variation in the decay rate (in relative units)."""
perfect_bias: bool = False
"""No up-down differences and device-to-device variability in the bounds
for the devices in the bias row."""
reset: float = 0.01
"""The reset values and spread per cross-point ``ij`` when using reset functionality
of the device."""
reset_dtod: float = 0.0
"""See ``reset``."""
reset_std: float = 0.01
"""See ``reset``."""
up_down: float = 0.0
r"""Up and down direction step sizes can be systematically different and also
vary across devices.
:math:`\Delta w_{ij}^d` is set during RPU initialization (for each cross-point `ij`):
.. math::
\Delta w_{ij}^d = d\; \Delta w_\text{min}\, \left(
1 + d \beta_{ij} + \sigma_\text{d-to-d}\xi\right)
where \xi is again a standard Gaussian. :math:`\beta_{ij}`
is the directional up `versus` down bias. At initialization
``up_down_dtod`` and ``up_down`` defines this bias term:
.. math::
\beta_{ij} = \beta_\text{up-down} + \xi
\sigma_\text{up-down-dtod}
where \xi is again a standard Gaussian number and
:math:`\beta_\text{up-down}` corresponds to
``up_down``. Note that ``up_down_dtod`` is again given in
relative units to ``dw_min``.
"""
up_down_dtod: float = 0.01
"""See ``up_down``."""
w_max: float = 0.6
"""See ``w_min``."""
w_max_dtod: float = 0.3
"""See ``w_min_dtod``."""
w_min: float = -0.6
"""Mean of hard bounds across device cross-point `ij`. The parameters
``w_min`` and ``w_max`` are used to set the min/max bounds independently.
Note:
For this abstract device, we assume that weights can have
positive and negative values and are symmetrically around
zero. In physical circuit terms, this might be implemented
as a difference of two resistive elements.
"""
w_min_dtod: float = 0.3
"""Device-to-device variation of the hard bounds, of min and max value,
respectively. All are given in relative units to ``w_min``, or ``w_max``,
respectively."""
def as_bindings(self) -> devices.PulsedResistiveDeviceParameter:
"""Return a representation of this instance as a simulator bindings object."""
return parameters_to_bindings(self)
def requires_diffusion(self) -> bool:
"""Return whether device has diffusion enabled."""
return self.diffusion > 0.0
def requires_decay(self) -> bool:
"""Return whether device has decay enabled."""
return self.lifetime > 0.0
@dataclass
class UnitCell(_PrintableMixin):
"""Parameters that modify the behaviour of a unit cell."""
bindings_class: ClassVar[Type] = devices.VectorResistiveDeviceParameter
unit_cell_devices: List = field(default_factory=list)
"""Devices that compose this unit cell."""
def as_bindings(self) -> devices.VectorResistiveDeviceParameter:
"""Return a representation of this instance as a simulator bindings object."""
raise NotImplementedError
def requires_diffusion(self) -> bool:
"""Return whether device has diffusion enabled."""
return any(dev.requires_diffusion() for dev in self.unit_cell_devices)
def requires_decay(self) -> bool:
"""Return whether device has decay enabled."""
return any(dev.requires_decay() for dev in self.unit_cell_devices)
###############################################################################
# Specific devices based on ``pulsed``.
###############################################################################
@dataclass
class IdealDevice(_PrintableMixin):
"""Ideal update behavior (using floating point), but forward/backward
might be non-ideal.
Ideal update behavior (using floating point), however,
forward/backward might still have a non-ideal ADC or noise added.
"""
bindings_class: ClassVar[Type] = devices.IdealResistiveDeviceParameter
construction_seed: int = 0
"""If not equal 0, will set a unique seed for hidden parameters
during construction"""
diffusion: float = 0.0
"""Standard deviation of diffusion process."""
lifetime: float = 0.0
r"""One over `decay_rate`, ie :math:`1/r_\text{decay}`."""
def as_bindings(self) -> devices.IdealResistiveDeviceParameter:
"""Return a representation of this instance as a simulator bindings object."""
return parameters_to_bindings(self)
def requires_diffusion(self) -> bool:
"""Return whether device has diffusion enabled."""
return self.diffusion > 0.0
def requires_decay(self) -> bool:
"""Return whether device has decay enabled."""
return self.lifetime > 0.0
@dataclass
class ConstantStepDevice(PulsedDevice):
r"""Pulsed update behavioral model: constant step.
Pulsed update behavioral model, where the update step of
material is constant throughout the resistive range (up to hard
bounds).
In more detail, the update behavior implemented for ``ConstantStep``
is:
.. math::
w_{ij} &\leftarrow& w_{ij} - \Delta w_{ij}^d(1 + \sigma_\text{c-to-c}\,\xi)
w_{ij} &\leftarrow& \text{clip}(w_{ij},b^\text{min}_{ij},b^\text{max}_{ij})
where :math:`d` is the direction of the update (product of signs
of input and error). :math:`\Delta w_{ij}^d` is the update step
size of the cross-point `ij` in direction :math:`d` (up or down).
Note that each cross-point has separate update sizes so that
device-to-device fluctuations and biases in the directions can be
given.
Moreover, the clipping bounds of each cross-point `ij`
(i.e. :math:`b_{ij}^\text{max/min}`) are also different in
general. The mean and the amount of systematic spread from
device-to-device can be given as parameters, see below.
For parameters regarding the devices settings, see e.g.
:class:`~aihwkit.simulator.parameters.ConstantStepResistiveDeviceParameters`.
"""
bindings_class: ClassVar[Type] = devices.ConstantStepResistiveDeviceParameter
@dataclass
class LinearStepDevice(PulsedDevice):
r"""Pulsed update behavioral model: linear step.
Pulsed update behavioral model, where | |
# -*- coding: utf-8 -*-
from load import *
from fft import *
from plots import *
print('\nplotting fields\n')
outdir = './fig_fields/'
# Load 2D cut
ncfile = netcdf.netcdf_file(input_dir+runname+'.out.2D.nc'+restart_num, 'r')
tt_fld = np.copy(ncfile.variables['tt' ][:]); tt_fld = np.delete(tt_fld , ignored_points_fld, axis = 0)
xx_fld = np.copy(ncfile.variables['xx' ][:])
yy_fld = np.copy(ncfile.variables['yy' ][:])
zz_fld = np.copy(ncfile.variables['zz' ][:])
rho_r_z0 = np.copy(ncfile.variables['rho_r_z0'][:]); rho_r_z0 = np.delete(rho_r_z0, ignored_points_fld, axis = 0)
rho_r_x0 = np.copy(ncfile.variables['rho_r_x0'][:]); rho_r_x0 = np.delete(rho_r_x0, ignored_points_fld, axis = 0)
rho_r_y0 = np.copy(ncfile.variables['rho_r_y0'][:]); rho_r_y0 = np.delete(rho_r_y0, ignored_points_fld, axis = 0)
mx_r_z0 = np.copy(ncfile.variables['mx_r_z0' ][:]); mx_r_z0 = np.delete(mx_r_z0 , ignored_points_fld, axis = 0)
mx_r_x0 = np.copy(ncfile.variables['mx_r_x0' ][:]); mx_r_x0 = np.delete(mx_r_x0 , ignored_points_fld, axis = 0)
mx_r_y0 = np.copy(ncfile.variables['mx_r_y0' ][:]); mx_r_y0 = np.delete(mx_r_y0 , ignored_points_fld, axis = 0)
my_r_z0 = np.copy(ncfile.variables['my_r_z0' ][:]); my_r_z0 = np.delete(my_r_z0 , ignored_points_fld, axis = 0)
my_r_x0 = np.copy(ncfile.variables['my_r_x0' ][:]); my_r_x0 = np.delete(my_r_x0 , ignored_points_fld, axis = 0)
my_r_y0 = np.copy(ncfile.variables['my_r_y0' ][:]); my_r_y0 = np.delete(my_r_y0 , ignored_points_fld, axis = 0)
mz_r_z0 = np.copy(ncfile.variables['mz_r_z0' ][:]); mz_r_z0 = np.delete(mz_r_z0 , ignored_points_fld, axis = 0)
mz_r_x0 = np.copy(ncfile.variables['mz_r_x0' ][:]); mz_r_x0 = np.delete(mz_r_x0 , ignored_points_fld, axis = 0)
mz_r_y0 = np.copy(ncfile.variables['mz_r_y0' ][:]); mz_r_y0 = np.delete(mz_r_y0 , ignored_points_fld, axis = 0)
wx_r_z0 = np.copy(ncfile.variables['wx_r_z0' ][:]); wx_r_z0 = np.delete(wx_r_z0 , ignored_points_fld, axis = 0)
wx_r_x0 = np.copy(ncfile.variables['wx_r_x0' ][:]); wx_r_x0 = np.delete(wx_r_x0 , ignored_points_fld, axis = 0)
wx_r_y0 = np.copy(ncfile.variables['wx_r_y0' ][:]); wx_r_y0 = np.delete(wx_r_y0 , ignored_points_fld, axis = 0)
wy_r_z0 = np.copy(ncfile.variables['wy_r_z0' ][:]); wy_r_z0 = np.delete(wy_r_z0 , ignored_points_fld, axis = 0)
wy_r_x0 = np.copy(ncfile.variables['wy_r_x0' ][:]); wy_r_x0 = np.delete(wy_r_x0 , ignored_points_fld, axis = 0)
wy_r_y0 = np.copy(ncfile.variables['wy_r_y0' ][:]); wy_r_y0 = np.delete(wy_r_y0 , ignored_points_fld, axis = 0)
wz_r_z0 = np.copy(ncfile.variables['wz_r_z0' ][:]); wz_r_z0 = np.delete(wz_r_z0 , ignored_points_fld, axis = 0)
wz_r_x0 = np.copy(ncfile.variables['wz_r_x0' ][:]); wz_r_x0 = np.delete(wz_r_x0 , ignored_points_fld, axis = 0)
wz_r_y0 = np.copy(ncfile.variables['wz_r_y0' ][:]); wz_r_y0 = np.delete(wz_r_y0 , ignored_points_fld, axis = 0)
bx_r_z0 = np.copy(ncfile.variables['bx_r_z0' ][:]); bx_r_z0 = np.delete(bx_r_z0 , ignored_points_fld, axis = 0)
bx_r_x0 = np.copy(ncfile.variables['bx_r_x0' ][:]); bx_r_x0 = np.delete(bx_r_x0 , ignored_points_fld, axis = 0)
bx_r_y0 = np.copy(ncfile.variables['bx_r_y0' ][:]); bx_r_y0 = np.delete(bx_r_y0 , ignored_points_fld, axis = 0)
by_r_z0 = np.copy(ncfile.variables['by_r_z0' ][:]); by_r_z0 = np.delete(by_r_z0 , ignored_points_fld, axis = 0)
by_r_x0 = np.copy(ncfile.variables['by_r_x0' ][:]); by_r_x0 = np.delete(by_r_x0 , ignored_points_fld, axis = 0)
by_r_y0 = np.copy(ncfile.variables['by_r_y0' ][:]); by_r_y0 = np.delete(by_r_y0 , ignored_points_fld, axis = 0)
bz_r_z0 = np.copy(ncfile.variables['bz_r_z0' ][:]); bz_r_z0 = np.delete(bz_r_z0 , ignored_points_fld, axis = 0)
bz_r_x0 = np.copy(ncfile.variables['bz_r_x0' ][:]); bz_r_x0 = np.delete(bz_r_x0 , ignored_points_fld, axis = 0)
bz_r_y0 = np.copy(ncfile.variables['bz_r_y0' ][:]); bz_r_y0 = np.delete(bz_r_y0 , ignored_points_fld, axis = 0)
jx_r_z0 = np.copy(ncfile.variables['jx_r_z0' ][:]); jx_r_z0 = np.delete(jx_r_z0 , ignored_points_fld, axis = 0)
jx_r_x0 = np.copy(ncfile.variables['jx_r_x0' ][:]); jx_r_x0 = np.delete(jx_r_x0 , ignored_points_fld, axis = 0)
jx_r_y0 = np.copy(ncfile.variables['jx_r_y0' ][:]); jx_r_y0 = np.delete(jx_r_y0 , ignored_points_fld, axis = 0)
jy_r_z0 = np.copy(ncfile.variables['jy_r_z0' ][:]); jy_r_z0 = np.delete(jy_r_z0 , ignored_points_fld, axis = 0)
jy_r_x0 = np.copy(ncfile.variables['jy_r_x0' ][:]); jy_r_x0 = np.delete(jy_r_x0 , ignored_points_fld, axis = 0)
jy_r_y0 = np.copy(ncfile.variables['jy_r_y0' ][:]); jy_r_y0 = np.delete(jy_r_y0 , ignored_points_fld, axis = 0)
jz_r_z0 = np.copy(ncfile.variables['jz_r_z0' ][:]); jz_r_z0 = np.delete(jz_r_z0 , ignored_points_fld, axis = 0)
jz_r_x0 = np.copy(ncfile.variables['jz_r_x0' ][:]); jz_r_x0 = np.delete(jz_r_x0 , ignored_points_fld, axis = 0)
jz_r_y0 = np.copy(ncfile.variables['jz_r_y0' ][:]); jz_r_y0 = np.delete(jz_r_y0 , ignored_points_fld, axis = 0)
ux_r_z0 = mx_r_z0/rho_r_z0
ux_r_x0 = mx_r_x0/rho_r_x0
ux_r_y0 = mx_r_y0/rho_r_y0
uy_r_z0 = my_r_z0/rho_r_z0
uy_r_x0 = my_r_x0/rho_r_x0
uy_r_y0 = my_r_y0/rho_r_y0
uz_r_z0 = mz_r_z0/rho_r_z0
uz_r_x0 = mz_r_x0/rho_r_x0
uz_r_y0 = mz_r_y0/rho_r_y0
ncfile.close()
#--------------------------------------------------------#
# plot final snapshot #
#--------------------------------------------------------#
u_r_z0 = np.sqrt(ux_r_z0**2 + uy_r_z0**2 + uz_r_z0**2)
u_r_y0 = np.sqrt(ux_r_y0**2 + uy_r_y0**2 + uz_r_y0**2)
u_r_x0 = np.sqrt(ux_r_x0**2 + uy_r_x0**2 + uz_r_x0**2)
w_r_z0 = np.sqrt(wx_r_z0**2 + wy_r_z0**2 + wz_r_z0**2)
w_r_y0 = np.sqrt(wx_r_y0**2 + wy_r_y0**2 + wz_r_y0**2)
w_r_x0 = np.sqrt(wx_r_x0**2 + wy_r_x0**2 + wz_r_x0**2)
b_r_z0 = np.sqrt(bx_r_z0**2 + by_r_z0**2 + bz_r_z0**2)
b_r_y0 = np.sqrt(bx_r_y0**2 + by_r_y0**2 + bz_r_y0**2)
b_r_x0 = np.sqrt(bx_r_x0**2 + by_r_x0**2 + bz_r_x0**2)
j_r_z0 = np.sqrt(jx_r_z0**2 + jy_r_z0**2 + jz_r_z0**2)
j_r_y0 = np.sqrt(jx_r_y0**2 + jy_r_y0**2 + jz_r_y0**2)
j_r_x0 = np.sqrt(jx_r_x0**2 + jy_r_x0**2 + jz_r_x0**2)
plot_3d(rho_r_z0[final_fld_idx,:,:], rho_r_y0[final_fld_idx,:,:], rho_r_x0[final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$\rho (t = %.2E)$' % tt_fld[final_fld_idx], cmp=parula_map, save=outdir+'rho.pdf')
plot_3d(u_r_z0 [final_fld_idx,:,:], u_r_y0 [final_fld_idx,:,:], u_r_x0 [final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, (ux_r_z0[final_fld_idx,:,:], uy_r_z0[final_fld_idx,:,:]), (ux_r_y0[final_fld_idx,:,:], uz_r_y0[final_fld_idx,:,:]), (uy_r_x0[final_fld_idx,:,:], uz_r_x0[final_fld_idx,:,:]), xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\bm{u}| (t = %.2E)$' % tt_fld[final_fld_idx], cmp=parula_map, streamline_density=1, streamline_width=0.5, streamline_color='w', save=outdir+'u.pdf')
plot_3d(b_r_z0 [final_fld_idx,:,:], b_r_y0 [final_fld_idx,:,:], b_r_x0 [final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, (bx_r_z0[final_fld_idx,:,:], by_r_z0[final_fld_idx,:,:]), (bx_r_y0[final_fld_idx,:,:], bz_r_y0[final_fld_idx,:,:]), (by_r_x0[final_fld_idx,:,:], bz_r_x0[final_fld_idx,:,:]), xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\bm{B}| (t = %.2E)$' % tt_fld[final_fld_idx], cmp=parula_map, streamline_density=1, streamline_width=0.5, streamline_color='w', save=outdir+'b.pdf')
if is2D:
plot_3d(wz_r_z0[final_fld_idx,:,:], wz_r_y0[final_fld_idx,:,:], wz_r_x0[final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$(\Curl\bm{u})_z (t = %.2E)$' % tt_fld[final_fld_idx], cmp='RdBu_r', save=outdir+'w.pdf')
plot_3d(jz_r_z0[final_fld_idx,:,:], jz_r_y0[final_fld_idx,:,:], jz_r_x0[final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$(\Curl\bm{B})_z (t = %.2E)$' % tt_fld[final_fld_idx], cmp='RdBu_r', save=outdir+'j.pdf')
else:
plot_3d( w_r_z0[final_fld_idx,:,:], w_r_y0[final_fld_idx,:,:], w_r_x0[final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\Curl\bm{u}| (t = %.2E)$' % tt_fld[final_fld_idx], cmp=parula_map, save=outdir+'w.pdf')
plot_3d( j_r_z0[final_fld_idx,:,:], j_r_y0[final_fld_idx,:,:], j_r_x0[final_fld_idx,:,:], xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\Curl\bm{B}| (t = %.2E)$' % tt_fld[final_fld_idx], cmp=parula_map, save=outdir+'j.pdf')
#--------------------------------------------------------#
# plot movie #
#--------------------------------------------------------#
if ismovie:
# evenly space time
# nframe = min(200, int(tt[:final_fld_idx].size))
nframe = tt_fld[:final_fld_idx].size
idx = np.unique([np.argmin(abs(tt_fld - np.linspace(tt_fld[0], tt_fld[-1], nframe)[i])) for i in range(0, nframe)])
tt_fld = tt_fld [idx]
rho_r_z0 = np.take(rho_r_z0, idx, axis=0)
rho_r_y0 = np.take(rho_r_y0, idx, axis=0)
rho_r_x0 = np.take(rho_r_x0, idx, axis=0)
ux_r_z0 = np.take(ux_r_z0 , idx, axis=0)
ux_r_y0 = np.take(ux_r_y0 , idx, axis=0)
ux_r_x0 = np.take(ux_r_x0 , idx, axis=0)
uy_r_z0 = np.take(uy_r_z0 , idx, axis=0)
uy_r_y0 = np.take(uy_r_y0 , idx, axis=0)
uy_r_x0 = np.take(uy_r_x0 , idx, axis=0)
uz_r_z0 = np.take(uz_r_z0 , idx, axis=0)
uz_r_y0 = np.take(uz_r_y0 , idx, axis=0)
uz_r_x0 = np.take(uz_r_x0 , idx, axis=0)
wx_r_z0 = np.take(wx_r_z0 , idx, axis=0)
wx_r_y0 = np.take(wx_r_y0 , idx, axis=0)
wx_r_x0 = np.take(wx_r_x0 , idx, axis=0)
wy_r_z0 = np.take(wy_r_z0 , idx, axis=0)
wy_r_y0 = np.take(wy_r_y0 , idx, axis=0)
wy_r_x0 = np.take(wy_r_x0 , idx, axis=0)
wz_r_z0 = np.take(wz_r_z0 , idx, axis=0)
wz_r_y0 = np.take(wz_r_y0 , idx, axis=0)
wz_r_x0 = np.take(wz_r_x0 , idx, axis=0)
bx_r_z0 = np.take(bx_r_z0 , idx, axis=0)
bx_r_y0 = np.take(bx_r_y0 , idx, axis=0)
bx_r_x0 = np.take(bx_r_x0 , idx, axis=0)
by_r_z0 = np.take(by_r_z0 , idx, axis=0)
by_r_y0 = np.take(by_r_y0 , idx, axis=0)
by_r_x0 = np.take(by_r_x0 , idx, axis=0)
bz_r_z0 = np.take(bz_r_z0 , idx, axis=0)
bz_r_y0 = np.take(bz_r_y0 , idx, axis=0)
bz_r_x0 = np.take(bz_r_x0 , idx, axis=0)
jx_r_z0 = np.take(jx_r_z0 , idx, axis=0)
jx_r_y0 = np.take(jx_r_y0 , idx, axis=0)
jx_r_x0 = np.take(jx_r_x0 , idx, axis=0)
jy_r_z0 = np.take(jy_r_z0 , idx, axis=0)
jy_r_y0 = np.take(jy_r_y0 , idx, axis=0)
jy_r_x0 = np.take(jy_r_x0 , idx, axis=0)
jz_r_z0 = np.take(jz_r_z0 , idx, axis=0)
jz_r_y0 = np.take(jz_r_y0 , idx, axis=0)
jz_r_x0 = np.take(jz_r_x0 , idx, axis=0)
u_r_z0 = np.sqrt(ux_r_z0**2 + uy_r_z0**2 + uz_r_z0**2)
u_r_y0 = np.sqrt(ux_r_y0**2 + uy_r_y0**2 + uz_r_y0**2)
u_r_x0 = np.sqrt(ux_r_x0**2 + uy_r_x0**2 + uz_r_x0**2)
w_r_z0 = np.sqrt(wx_r_z0**2 + wy_r_z0**2 + wz_r_z0**2)
w_r_y0 = np.sqrt(wx_r_y0**2 + wy_r_y0**2 + wz_r_y0**2)
w_r_x0 = np.sqrt(wx_r_x0**2 + wy_r_x0**2 + wz_r_x0**2)
b_r_z0 = np.sqrt(bx_r_z0**2 + by_r_z0**2 + bz_r_z0**2)
b_r_y0 = np.sqrt(bx_r_y0**2 + by_r_y0**2 + bz_r_y0**2)
b_r_x0 = np.sqrt(bx_r_x0**2 + by_r_x0**2 + bz_r_x0**2)
j_r_z0 = np.sqrt(jx_r_z0**2 + jy_r_z0**2 + jz_r_z0**2)
j_r_y0 = np.sqrt(jx_r_y0**2 + jy_r_y0**2 + jz_r_y0**2)
j_r_x0 = np.sqrt(jx_r_x0**2 + jy_r_x0**2 + jz_r_x0**2)
movie_3d(tt_fld, rho_r_z0, rho_r_y0, rho_r_x0, xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$\rho$' , cmp=parula_map, save=outdir+'rho_anim.gif')
movie_3d(tt_fld, u_r_z0 , u_r_y0, u_r_x0 , xx_fld, yy_fld, zz_fld, (ux_r_z0, uy_r_z0), (ux_r_y0, uz_r_y0), (uy_r_x0, uz_r_x0), xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\bm{u}|$', cmp=parula_map, save=outdir+'u_anim.gif')
movie_3d(tt_fld, b_r_z0 , b_r_y0, b_r_x0 , xx_fld, yy_fld, zz_fld, (bx_r_z0, by_r_z0), (bx_r_y0, bz_r_y0), (by_r_x0, bz_r_x0), xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\bm{B}|$', cmp=parula_map, save=outdir+'b_anim.gif')
if is2D:
movie_3d(tt_fld, wz_r_z0, wz_r_y0, wz_r_x0, xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$(\Curl\bm{u})_z$', cmp='RdBu_r', save=outdir+'w_anim.gif')
movie_3d(tt_fld, jz_r_z0, jz_r_y0, jz_r_x0, xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$(\Curl\bm{B})_z$', cmp='RdBu_r', save=outdir+'j_anim.gif')
else:
movie_3d(tt_fld, w_r_z0, w_r_y0, w_r_x0, xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\Curl\bm{u}|$', cmp=parula_map, save=outdir+'w_anim.gif')
movie_3d(tt_fld, j_r_z0, j_r_y0, j_r_x0, xx_fld, yy_fld, zz_fld, xlab=xlab, ylab=ylab, zlab=zlab, title=r'$|\Curl\bm{B}|$', cmp=parula_map, save=outdir+'j_anim.gif')
#------------------#
# output data #
#------------------#
from scipy.io import savemat
savemat(outdir + 'grid' , {'xx':xx, 'yy':yy, 'zz':zz})
savemat(outdir + 'rho_r' , {
'tt' :tt[final_fld_idx],
'rho_r_z0' :rho_r_z0[final_fld_idx,:,:],
'rho_r_x0' :rho_r_x0[final_fld_idx,:,:],
'rho_r_y0' :rho_r_y0[final_fld_idx,:,:],
})
savemat(outdir + 'u_r' , {
'tt' :tt[final_fld_idx],
'ux_r_z0' :ux_r_z0 [final_fld_idx,:,:],
'ux_r_x0' :ux_r_x0 [final_fld_idx,:,:],
'ux_r_y0' :ux_r_y0 [final_fld_idx,:,:],
#
'uy_r_z0' :uy_r_z0 [final_fld_idx,:,:],
'uy_r_x0' :uy_r_x0 [final_fld_idx,:,:],
'uy_r_y0' :uy_r_y0 [final_fld_idx,:,:],
#
'uz_r_z0' :uz_r_z0 [final_fld_idx,:,:],
'uz_r_x0' :uz_r_x0 [final_fld_idx,:,:],
'uz_r_y0' :uz_r_y0 [final_fld_idx,:,:],
})
savemat(outdir + 'b_r' , {
'tt' :tt[final_fld_idx],
'bx_r_z0' :bx_r_z0 [final_fld_idx,:,:],
'bx_r_x0' :bx_r_x0 [final_fld_idx,:,:],
'bx_r_y0' :bx_r_y0 [final_fld_idx,:,:],
#
'by_r_z0' :by_r_z0 [final_fld_idx,:,:],
'by_r_x0' :by_r_x0 [final_fld_idx,:,:],
'by_r_y0' :by_r_y0 [final_fld_idx,:,:],
#
'bz_r_z0' :bz_r_z0 [final_fld_idx,:,:],
'bz_r_x0' :bz_r_x0 [final_fld_idx,:,:],
'bz_r_y0' :bz_r_y0 | |
side='left')
res = self.executor.execute_tensor(t9, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='left')
np.testing.assert_array_equal(res, expected)
# test tensor, side right
t10 = searchsorted(arr, tensor(raw2, chunk_size=2), side='right')
res = self.executor.execute_tensor(t10, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='right')
np.testing.assert_array_equal(res, expected)
# test one chunk
arr = tensor(raw, chunk_size=16)
# test scalar, tensor to search has 1 chunk
t11 = searchsorted(arr, 20)
res = self.executor.execute_tensor(t11, concat=True)[0]
expected = np.searchsorted(raw, 20)
np.testing.assert_array_equal(res, expected)
# test tensor with 1 chunk, tensor to search has 1 chunk
t12 = searchsorted(arr, tensor(raw2, chunk_size=4))
res = self.executor.execute_tensor(t12, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test tensor with more than 1 chunk, tensor to search has 1 chunk
t13 = searchsorted(arr, tensor(raw2, chunk_size=2))
res = self.executor.execute_tensor(t13, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test sorter
raw3 = np.random.randint(100, size=(16,))
arr = tensor(raw3, chunk_size=3)
order = np.argsort(raw3)
order_arr = tensor(order, chunk_size=4)
t14 = searchsorted(arr, 20, sorter=order_arr)
res = self.executor.execute_tensor(t14, concat=True)[0]
expected = np.searchsorted(raw3, 20, sorter=order)
np.testing.assert_array_equal(res, expected)
def testUniqueExecution(self):
rs = np.random.RandomState(0)
raw = rs.randint(10, size=(10,))
for chunk_size in (10, 3):
x = tensor(raw, chunk_size=chunk_size)
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
expected = np.unique(raw)
np.testing.assert_array_equal(res, expected)
y, indices = unique(x, return_index=True)
res = self.executor.execute_tensors([y, indices])
expected = np.unique(raw, return_index=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, inverse = unique(x, return_inverse=True)
res = self.executor.execute_tensors([y, inverse])
expected = np.unique(raw, return_inverse=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, counts = unique(x, return_counts=True)
res = self.executor.execute_tensors([y, counts])
expected = np.unique(raw, return_counts=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, indices, inverse, counts = unique(x, return_index=True,
return_inverse=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, inverse, counts])
expected = np.unique(raw, return_index=True,
return_inverse=True, return_counts=True)
self.assertEqual(len(res), 4)
self.assertEqual(len(expected), 4)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
np.testing.assert_array_equal(res[3], expected[3])
y, indices, counts = unique(x, return_index=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, counts])
expected = np.unique(raw, return_index=True, return_counts=True)
self.assertEqual(len(res), 3)
self.assertEqual(len(expected), 3)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
raw2 = rs.randint(10, size=(4, 5, 6))
x2 = tensor(raw2, chunk_size=chunk_size)
y2 = unique(x2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=1)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=1)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=2)
np.testing.assert_array_equal(res, expected)
raw = rs.randint(10, size=(10, 20))
raw[:, 0] = raw[:, 11] = rs.randint(10, size=(10,))
x = tensor(raw, chunk_size=2)
y, ind, inv, counts = unique(x, aggregate_size=3, axis=1, return_index=True,
return_inverse=True, return_counts=True)
res_unique, res_ind, res_inv, res_counts = self.executor.execute_tensors((y, ind, inv, counts))
exp_unique, exp_ind, exp_counts = np.unique(raw, axis=1, return_index=True, return_counts=True)
raw_res_unique = res_unique
res_unique_df = pd.DataFrame(res_unique)
res_unique_ind = np.asarray(res_unique_df.sort_values(list(range(res_unique.shape[0])),
axis=1).columns)
res_unique = res_unique[:, res_unique_ind]
res_ind = res_ind[res_unique_ind]
res_counts = res_counts[res_unique_ind]
np.testing.assert_array_equal(res_unique, exp_unique)
np.testing.assert_array_equal(res_ind, exp_ind)
np.testing.assert_array_equal(raw_res_unique[:, res_inv], raw)
np.testing.assert_array_equal(res_counts, exp_counts)
x = (mt.random.RandomState(0).rand(1000, chunk_size=20) > 0.5).astype(np.int32)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.array([0, 1]))
# test sparse
sparse_raw = sps.random(10, 3, density=0.1, format='csr', random_state=rs)
x = tensor(sparse_raw, chunk_size=2)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.unique(sparse_raw.data))
# test empty
x = tensor([])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([]))
x = tensor([[]])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([[]]))
@require_cupy
def testToGPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3)
gx = to_gpu(x)
res = self.executor.execute_tensor(gx, concat=True)[0]
np.testing.assert_array_equal(res.get(), raw)
@require_cupy
def testToCPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3, gpu=True)
cx = to_cpu(x)
res = self.executor.execute_tensor(cx, concat=True)[0]
np.testing.assert_array_equal(res, raw)
def testSortExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# 1-d chunk
raw = np.random.rand(100)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test force need_align=True
sx = sort(x)
sx.op._need_align = True
res = self.executor.execute_tensor(sx, concat=True)[0]
self.assertEqual(get_tiled(sx).nsplits, get_tiled(x).nsplits)
np.testing.assert_array_equal(res, np.sort(raw))
# test psrs_kinds
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# structured dtype
raw = np.empty(100, dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=100, dtype=np.int32)
raw['size'] = np.random.randint(1000, size=100, dtype=np.int64)
x = tensor(raw, chunk_size=10)
sx = sort(x, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test psrs_kinds with structured dtype
sx = sort(x, order=['size', 'id'], psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test flatten case
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=5)
sx = sort(x, axis=None)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=None))
# test multi-dimension
raw = np.random.rand(10, 100)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x, psrs_kinds=['quicksort'] * 3)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
raw = np.random.rand(10, 99)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test 3-d
raw = np.random.rand(20, 25, 28)
x = tensor(raw, chunk_size=(10, 5, 7))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, axis=0)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=0, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=1)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
sx = sort(x, axis=1, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
# test multi-dimension with structured type
raw = np.empty((10, 100), dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=(10, 100), dtype=np.int32)
raw['size'] = np.random.randint(1000, size=(10, 100), dtype=np.int64)
x = tensor(raw, chunk_size=(3, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
sx = sort(x, order=['size'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size']))
sx = sort(x, axis=0, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0, order=['size', 'id']))
sx = sort(x, axis=0, order=['size', 'id'],
psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0, order=['size', 'id']))
# test inplace sort
raw = np.random.rand(10, 12)
a = tensor(raw, chunk_size=(5, 4))
a.sort(axis=1)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
a.sort(axis=0)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(np.sort(raw, axis=1), axis=0))
# test with empty chunk
raw = np.random.rand(20, 10)
raw[:, :8] = 1
a = tensor(raw, chunk_size=5)
filtered = a[a < 1]
filtered.sort()
res = self.executor.execute_tensor(filtered, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw[raw < 1]))
def testSortIndicesExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
r = sort(x, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))
x = tensor(raw, chunk_size=(22, 4))
r = sort(x, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))
raw = np.random.rand(100)
x = tensor(raw, chunk_size=23)
r = sort(x, axis=0, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, raw[si])
def testArgsort(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
xa = argsort(x)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))
x = tensor(raw, chunk_size=(22, 4))
xa = argsort(x)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))
raw = np.random.rand(100)
x = tensor(raw, chunk_size=23)
xa = argsort(x, axis=0)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw, axis=0), raw[r])
def testPartitionExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
px = partition(x, [1, 8])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res, np.partition(raw, [1, 8]))
# 1-d chunk
raw = np.random.rand(100)
x = tensor(raw, chunk_size=10)
kth = np.random.RandomState(0).randint(-100, 100, size=(10,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[kth], np.partition(raw, kth)[kth])
# structured dtype
raw = np.empty(100, dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=100, dtype=np.int32)
raw['size'] = np.random.randint(1000, size=100, dtype=np.int64)
x = tensor(raw, chunk_size=10)
px = partition(x, kth, order=['size', 'id'])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[kth], np.partition(raw, kth, order=['size', 'id'])[kth])
# test flatten case
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=5)
px = partition(x, kth, axis=None)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[kth], np.partition(raw, kth, axis=None)[kth])
# test multi-dimension
raw = np.random.rand(10, 100)
x = tensor(raw, chunk_size=(2, 10))
kth = np.random.RandomState(0).randint(-10, 10, size=(3,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[:, kth], np.partition(raw, kth)[:, kth])
raw = np.random.rand(10, | |
= self.getalleventtypes()
self.eventtype = ""
# setlayer
# make 800 lines of code, or make an unreadable cinderblock of chaos
# this seems more fun. But I definitely won't do it at work.
cond = "conditions"
# cons = "type"
# doc = "documentation"
name = 'layer'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# first values are the lambdas for condition testing, last element is the message of the error to raise
# the element before the last is the error to be raised.
# this can be scaled a lot, even though it doesn't need to.
_ = [(lambda x: (x >= 0)), ValueError, "Layer can not be negative"]
((self.dictvalues[name])[cond]).append(_)
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'marked'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
_ = [(lambda x: (x == 0) or (x == 1)), ValueError, "Marked can only be 0 or 1."]
((self.dictvalues[name])[cond]).append(_)
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'start'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# no condition to check
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'end'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'style'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'name'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'marginl'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'marginr'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'marginv'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'effect'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
name = 'text'
assert name in self.tiposdisponiveis, f"{name} missing from {self.tiposdisponiveis}"
# ((self.dictvalues[name])[doc]) = """Used to have a reference for the main docs here. Took it out to avoid
# copyright issues."""
# Initializing these values to avoid some issues
self.setmarked(0)
self.setlayer(0)
if isinstance(arg, Evento):
# These 2 sets should work even if arg doesn't have them set. What matters is that this is a copy of arg.
self.seteventtype(arg.geteventtype())
self.setformato(arg.getformato())
# Using every attribute name to set
# for _ in arg.getformato():
for _ in self.gettiposdisponiveis():
# setting every attribute using the names for both objects
self.setdictvalue(_, arg.getdictvalue(_))
# as a string, it is expected that it is a full line containing all 10 values and it's type
# these constraints will be checked by readevent.
elif isinstance(arg, str) and isinstance(argformato, Formato):
self.setformato(argformato.getformat())
self.readevent(arg)
elif isinstance(arg, list) and isinstance(argformato, Formato):
self.setformato(argformato.getformat())
self.setvalues(arg)
elif arg is None:
pass
else:
raise TypeError
def __repr__(self) -> str:
""" Prints formatted version of Evento.
Used for saving the file and debugging.
Values will follow 'self.getformat()' order.
:return: String.
"""
_type = self.geteventtype()
# Use the index of type to get the camelcase version of the same String.
# _type = ["Dialogue", "Comment", "Picture", "Sound", "Movie", "Command"][self.getalleventtypes().index(_type)]
_values = self.getvalues()
# Will not print or save invalid events
if _type is None:
return ""
_saida = f"{_type}: "
for _ in range(9):
if _values[_] is None:
return ""
_saida += f"{_values[_]},"
# last element shouldn't have ',' at the end
_saida += f"{_values[9]}\n"
return _saida
def seteventtype(self, str1: str) -> 'Evento':
""" Set this object's event type.
types can be:
self.getalleventtypes() == ["dialogue", "comment", "picture", "sound", "movie", "command"]
:param str1: String. This method is not case-sensitive.
:return: self
"""
if isinstance(str1, str) is False:
raise TypeError(f"{str1} has to be a String.")
_ = (str1.strip()).lower()
if _ in self.alleventtypes:
self.eventtype = f"{_}".capitalize()
return self
# invalid event type
raise ValueError(f"Arg: {_}, Acceptable values:{self.alleventtypes}")
# raise ErrorEditorSSA.ErrorEvents_Evento_Evento_SetEventType(f"Arg: {_}, Acceptable values:{self.alleventtypes
# }")
def geteventtype(self) -> str:
""" Return event type.
types can be every element from self.getalleventtypes() but capitalized.
["Dialogue", "Comment", "Picture", "Sound", "Movie", "Command"]
:return: String if set. Empty String "" if it wasn't set.
"""
return f"{self.eventtype}"
def __convertvalue__(self, name: str, value) -> Union[None, int, Timing, str, Margin, Effect, Text]:
""" Return a converted type of value based on what name it has.
Used by gets and sets.
:param name: String. Must be in self.tiposdisponiveis
:param value: any type valid for the name used. Call f"{(self.dictvalues[name])['documentation']}" for more
info.
:return: converted value
"""
_name = (name.strip()).lower()
assert _name in self.tiposdisponiveis, f"{name} - > {_name} must be in {self.tiposdisponiveis}."
# [layer, marked, start, end, style, name, marginl, marginr, marginv, effect, text]
# [int, int, Timing, Timing, str, str, Margin, Margin, Margin, Effect, Text]
if _name in ["layer", "marked"]:
if value is not None:
_value = int(value)
else:
_value = None
elif _name in ["start", "end"]:
_value = Timing(value)
elif _name in ["style", "name"]:
_value = f"{value}"
elif _name in ["marginl", "marginr", "marginv"]:
_value = Margin(value)
elif _name == "effect":
_value = Effect(value)
elif _name == "text":
_value = Text(value)
else:
raise ValueError
return _value
def setdictvalue(self, name: str, value: Union[None, int, float, str, Timing, Margin, Effect, Text]) -> 'Evento':
""" Set the atribute with name to value.
:param name: String. Must be in 'self.tiposdisponiveis'.
:param value: valid arg for that value constructor.
:return: self.
:raise ValueError: When 'layer' or 'marked' values are invalid.
"""
if isinstance(name, str) is False:
raise TypeError(f"{name} has to be a string.")
# assert isinstance(name, str), f"{name} has to be a string."
_name = (name.strip()).lower()
__cond = "conditions"
__val = "value"
if _name not in self.tiposdisponiveis:
raise ValueError(f"{name} - > {_name} must be in {self.tiposdisponiveis}.")
# assert _name in self.tiposdisponiveis, f"{name} - > {_name} must be in {self.tiposdisponiveis}."
# converts value into the type that will be stored.
_value = self.__convertvalue__(name, value)
# Time for the giant overkill for testing conditions
if len((self.dictvalues[_name])[__cond]) == 0:
(self.dictvalues[_name])[__val] = _value
else:
# getting the list of lists to check conditions
__clists = (self.dictvalues[_name])[__cond]
# looping through the list of lists
for __clist in __clists:
# looping through the list, but the last 2 elements [error, errorarg]
siz = len(__clist)
# list should have at least 1 condition, and then the last element will be a message
# and before the last will be the error to raise
# so siz can be 0, 3 and higher only
assert (siz != 1) and (siz != 2), f"{__clist} has {siz} elements. Coder mistake!"
for _ in range(siz - 2):
# if the lambda returns false (like an assert), raise the error at the end of the list
if ((__clist[_])(_value)) is False:
if __clist[siz - 1] is None:
raise __clist[siz - 2]
else:
raise (__clist[siz - 2])(f"{__clist[siz - 1]}")
else:
(self.dictvalues[_name])[__val] = _value
return self
def getdictvalue(self, name: str) -> Union[None, int, str, Timing, Margin, Effect, Text]:
""" Returns value with key 'name'.
:param name: String. Must be in 'self.tiposdisponiveis'.
:return: a copy of the stored value.
"""
if isinstance(name, | |
a row is inserted, these values will be taken
self.with_index = with_index # display index; also provide the original indices to the owner when updating value
self.col_defs = cols
self.immediate = immediate # e.g. for notebook pages immediate is False
self.can_add = can_add
self.can_remove = can_remove
self.can_insert = can_insert
self.can_remove_last = can_remove_last
if col_sizes is None:
self.col_sizes = []
else:
self.col_sizes = col_sizes
self.cur_row = self.cur_col = 0
self.editing_values = None # before pressing Apply; stored here because the editor grid might be deleted
self.grid = None
def set(self, value, *args, **kwargs):
Property.set(self, value, *args, **kwargs)
self._initialize_indices()
self.editing_values = None
self.update_display()
def get(self):
if self.deactivated:
return self.default_value
if not self.SKIP_EMPTY:
return self.value
ret = []
for row in self.value:
if not row or not any(row): continue
for col, col_def in zip(row, self.col_defs):
if col_def is self.STRING:
if col.strip():
ret.append(row)
break
else:
ret.append(row)
break
return ret
def create_editor(self, panel, sizer):
"Actually builds the grid to set the value of the property interactively"
label = self._find_label()
box_sizer = wx.StaticBoxSizer(wx.StaticBox(panel, -1, label), wx.VERTICAL)
# the buttons ##################################################################################################
extra_flag = wx.FIXED_MINSIZE
if self.can_add or self.can_insert or self.can_remove:
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
if not self.immediate:
apply_btn = wx.Button(panel, wx.ID_ANY, _(" &Apply "), style=wx.BU_EXACTFIT)
compat.SetToolTip(apply_btn, "Alt-A")
btn_sizer.Add(apply_btn, 0, extra_flag | wx.RIGHT, 16)
# the add/insert/remove buttons
add_btn = insert_btn = remove_btn = None
if self.can_add:
add_btn = wx.Button(panel, wx.ID_ANY, _(" A&dd "), style=wx.BU_EXACTFIT)
compat.SetToolTip(add_btn, "Ctrl-A")
add_btn.Bind(wx.EVT_BUTTON, self.add_row)
if self.can_insert:
insert_btn = wx.Button(panel, wx.ID_ANY, _(" &Insert "), style=wx.BU_EXACTFIT)
insert_btn.Bind(wx.EVT_BUTTON, self.insert_row)
compat.SetToolTip(insert_btn, "Ctrl-I")
if self.can_remove:
remove_btn = wx.Button(panel, wx.ID_ANY, _(" &Remove "), style=wx.BU_EXACTFIT)
remove_btn.Bind(wx.EVT_BUTTON, self.remove_row)
compat.SetToolTip(remove_btn, "Ctrl-R")
self.buttons = [add_btn, insert_btn, remove_btn]
for btn in self.buttons:
if btn: btn_sizer.Add( btn, 0, wx.LEFT | wx.RIGHT | extra_flag, 4 )
if not self.immediate:
self.buttons.insert(0, apply_btn)
reset_btn = wx.Button(panel, wx.ID_ANY, _(" Rese&t "), style=wx.BU_EXACTFIT)
compat.SetToolTip(reset_btn, "Alt-T or Ctrl-T")
reset_btn.Bind(wx.EVT_BUTTON, self.reset)
btn_sizer.AddStretchSpacer()
btn_sizer.Add(reset_btn, 0, extra_flag | wx.LEFT, 16)
self.buttons.append(reset_btn)
else:
self.buttons = []
# the grid #####################################################################################################
self.grid = wx.grid.Grid(panel, -1)
self.grid.Name = self.name
rowcount = len(self.value)
if self.can_add and self.immediate: rowcount += 1
self.grid.CreateGrid( rowcount, len(self.col_defs) )
self.grid.SetMargins(0, 0)
for i, (label,datatype) in enumerate(self.col_defs):
self.grid.SetColLabelValue(i, label)
GridProperty.col_format[datatype](self.grid, i)
# set row/col sizes
self.grid.SetRowLabelSize(20 if self.with_index else 0)
self.grid.SetColLabelSize(20)
if self.col_sizes:
self._set_col_sizes(self.col_sizes)
# add the button sizer and the grid to the sizer ###############################################################
if self.buttons:
box_sizer.Add(btn_sizer, 0, wx.BOTTOM | wx.EXPAND, 2)
box_sizer.Add(self.grid, 1, wx.EXPAND)
# add our sizer to the main sizer XXX change if required
sizer.Add(box_sizer, self._PROPORTION, wx.EXPAND)
self.update_display(start_editing=True)
self.grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.on_select_cell)
if self.buttons and not self.immediate:
apply_btn.Bind(wx.EVT_BUTTON, self.apply)
if compat.IS_CLASSIC:
self.grid.Bind(wx.grid.EVT_GRID_CMD_CELL_CHANGE, self.on_cell_changed)
else:
self.grid.Bind(wx.grid.EVT_GRID_CELL_CHANGED, self.on_cell_changed)
self.grid.Bind(wx.grid.EVT_GRID_CELL_CHANGING, self.on_cell_changing) # for validation
self.grid.Bind(wx.EVT_SET_FOCUS, self.on_focus)
self._set_tooltip(self.grid.GetGridWindow(), *self.buttons)
self.grid.Bind(wx.EVT_SIZE, self.on_size)
# On wx 2.8 the EVT_CHAR_HOOK handler for the control does not get called, so on_char will be called from main
#self.grid.Bind(wx.EVT_CHAR_HOOK, self.on_char)
self._width_delta = None
def on_char(self, event):
if isinstance(self.grid.FindFocus(), wx.TextCtrl):
# a cell is being edited
event.Skip()
return True # avoid propagation
self.on_focus()
key = (event.GetKeyCode(), event.GetModifiers())
# handle F2 key
if key==(wx.WXK_F2,0) and self.grid.CanEnableCellControl():
#self.grid.MakeCellVisible(...)
self.grid.EnableCellEditControl(enable=True)
return True
# handle Ctrl-I, Ctrl-A, Ctrl-R; Alt-A will be handled by the button itself
if key in ((73,2),(73,1)) and self.can_insert:
# Ctrl-I, Alt-I
self.insert_row(event)
elif key in ((65,2),(68,1)) and self.can_add:
# Ctrl-A, Alt-D
self.add_row(event)
elif key==(65,1) and not self.immediate:
# Alt-A
self.apply(event)
elif key==(82,2) and self.can_remove:
# Ctrl-R
self.remove_row(event)
elif key in ((84,2),(84,1)): # Ctrl-T, Alt-T
self.reset(event)
elif key==(67,2): # Ctrl-C
if not self._copy(): event.Skip()
elif key==(86,2): # Ctrl-V
if not self._paste(): event.Skip()
elif key==(88,2): # Ctrl-X
if not self._cut(): event.Skip()
else:
#event.Skip()
return False
return True # handled
####################################################################################################################
# clipboard
def _get_selection(self, restrict=False):
# return (selected_rows, selected_cols); selected_cols might be restricted to the editable ones
# non-contiguous selections are not really handled correctly
selected_rows = set()
selected_cols = set()
if self.grid.SelectedCells: # non-contiguous
for cell in self.grid.SelectedCells:
selected_rows.add(cell[0])
selected_cols.add(cell[1])
if self.grid.SelectionBlockTopLeft:
for tl, br in zip(self.grid.SelectionBlockTopLeft, self.grid.SelectionBlockBottomRight):
top,left = tl
bottom,right = br
selected_cols.update( range(left,right+1) )
selected_rows.update( range(top,bottom+1) )
if self.grid.SelectedCols:
selected_cols.update(self.grid.SelectedCols)
selected_rows.update(range(self.grid.NumberRows))
if self.grid.SelectedRows:
selected_rows.update(self.grid.SelectedRows)
selected_cols.update(range(self.grid.NumberCols))
if not selected_rows:
if self.cur_row>=self.grid.NumberRows:
selected_rows.add(self.grid.NumberRows)
else:
selected_rows.add(self.cur_row)
selected_cols.add(self.cur_col)
# XXX check this:
#if restrict and self.EDITABLE_COLS:
#selected_cols = [col for col in selected_cols if col in self.EDITABLE_COLS]
#else:
selected_cols = sorted(selected_cols)
selected_rows = sorted(selected_rows)
return selected_rows, selected_cols
def _to_clipboard(self, selection):
# place selection on clipboard
selected_rows, selected_cols = selection
all_values = self.editing_values or self.value
text = []
for r in selected_rows:
if r>=len(all_values): continue
row = all_values[r]
if row is not None:
text.append( "\t".join( [str(row[c]) for c in selected_cols] ) )
else:
text.append( "\t".join( [""]*len(selected_cols) ) )
text = "\n".join(text)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(wx.TextDataObject(text))
wx.TheClipboard.Close()
def _from_clipboard(self):
if not wx.TheClipboard.Open():
return None
do = wx.TextDataObject()
if wx.TheClipboard.IsSupported(do.GetFormat()):
success = wx.TheClipboard.GetData(do)
else:
success = False
wx.TheClipboard.Close()
if success:
ret = do.GetText().split("\n")
ret = [row.split("\t") for row in ret]
lengths = set( [len(row) for row in ret] )
if len(lengths)==1: return ret
return None
def _cut(self):
selection = self._get_selection()
if not selection: return False
self._to_clipboard(selection)
selected_rows, selected_cols = selection
values = self._ensure_editing_copy()
if len(selected_cols)==len(self.col_defs) and self.can_remove:
# delete complete rows
for row in reversed(selected_rows):
if row<len(values): del values[row]
else:
editable_columns = self.EDITABLE_COLS or list( range(len(self.col_defs)) )
for row in selected_rows:
for col in selected_cols:
if col in editable_columns:
values[row][col] = ""
self.update_display()
self._notify()
def _copy(self):
selection = self._get_selection()
if not selection: return False
self._to_clipboard(selection)
def _paste(self):
selected_rows, selected_cols = self._get_selection()
value = self._from_clipboard()
if not value: return
clipboard_columns = len(value[0])
paste_columns = self.EDITABLE_COLS or list( range(len(self.col_defs)) )
if clipboard_columns == len(paste_columns):
pass
elif clipboard_columns==len(selected_cols):
paste_columns = selected_cols
else:
wx.Bell()
return
# check and convert values; XXX use validation_res
for row_value in value:
for i,v in enumerate(row_value):
col = paste_columns[i]
try:
if self.col_defs[col][1]==self.INT:
row_value[i] = int(v)
elif self.col_defs[col][1]==self.FLOAT:
row_value[i] = float(v)
except ValueError:
wx.Bell()
return
values = self._ensure_editing_copy()
# the cases:
# single item to single or multiple cells
# multiple to multiple -> dimensions must match
# multiple to single -> starting from the selected line, must have enough lines or be extendable
if not self.IS_KEY_VALUE or not 0 in paste_columns:
if len(value)==1:
for row in selected_rows:
if row>=len(values):
values.append(self.default_row[:])
elif values[row] is None:
values[row] = self.default_row[:]
for v,col in zip(value[0], paste_columns):
values[row][col] = v
elif len(value)==len(selected_rows):
for row,row_value in zip(selected_rows, value):
if len(values)==row: values.append( None )
if values[row] is None: values[row] = self.default_row[:]
for v,col in zip(row_value, paste_columns):
values[row][col] = v
elif len(selected_rows)==1 and (self.can_add or (min(selected_rows)+len(value) <= len(values))):
row = selected_rows.pop()
for row_value in value:
if len(values)==row: values.append( None )
if values[row] is None: values[row] = self.default_row[:]
for v,col in zip(row_value, paste_columns):
values[row][col] = v
row += 1
else:
wx.Bell()
return
else:
# first column is key
keys = [row[0] for row in values]
for row in value:
if len(row_value)>len(self.col_defs):
row = row[:len(self.col_defs)]
else:
row += [''] * (len(self.col_defs) - len(row))
key = row[0]
if key in keys:
values[keys.index(key)][1:] = row[1:]
elif self.can_add:
values.append( row )
else:
raise ValueError("not implemented") # not used so far
self.update_display()
self._notify()
####################################################################################################################
def on_size(self, event):
event.Skip()
# resize last column to fill the space
if not self.grid: return
if hasattr(self.grid, "ShowScrollbars"):
#self.grid.ShowScrollbars(wx.SHOW_SB_DEFAULT,wx.SHOW_SB_NEVER) # keep horizontal scroll bar
self.grid.ShowScrollbars(wx.SHOW_SB_NEVER,wx.SHOW_SB_NEVER) # no scroll bar
if self._width_delta is None:
self._width_delta = self.grid.GetParent().GetSize()[0] - self.grid.GetSize()[0] + 30
self.grid.SetColSize(len(self.col_defs)-1, 10)
col_widths = 0
for n in range(len(self.col_defs)-1):
col_widths += self.grid.GetColSize(n)
remaining_width = self.grid.GetParent().GetSize()[0] - col_widths - self._width_delta - self.grid.GetRowLabelSize()
self.grid.SetColSize( len(self.col_defs)-1, max(remaining_width, 100) )
def on_select_cell(self, event):
self.cur_row = event.GetRow()
self.cur_col = event.GetCol()
event.Skip()
self.on_focus()
def update_display(self, start_editing=False):
if start_editing: self.editing = True
if not self.editing: return
# values is a list of lists with the values of the cells
value = self.editing_values if self.editing_values is not None else self.value
rows_new = len(value)
if self.can_add and self.immediate: rows_new += 1
# add or remove rows
rows = self.grid.GetNumberRows()
if rows < rows_new:
self.grid.AppendRows(rows_new - rows)
elif rows != rows_new:
self.grid.DeleteRows(rows_new, rows - rows_new)
# update content
self._changing_value = True
for i,row in enumerate(value):
for j, | |
<filename>test/test_tree.py
__copyright__ = "Copyright (C) 2012 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import sys
import pytest
import logging
import pyopencl as cl
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from boxtree.tools import make_normal_particle_array
logger = logging.getLogger(__name__)
# {{{ bounding box test
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("dims", [2, 3])
@pytest.mark.parametrize("nparticles", [9, 4096, 10**5])
def test_bounding_box(ctx_factory, dtype, dims, nparticles):
logging.basicConfig(level=logging.INFO)
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree.tools import AXIS_NAMES
from boxtree.bounding_box import BoundingBoxFinder
bbf = BoundingBoxFinder(ctx)
axis_names = AXIS_NAMES[:dims]
logger.info(f"{dtype} - {dims} {nparticles}")
particles = make_normal_particle_array(queue, nparticles, dims, dtype)
bbox_min = [np.min(x.get()) for x in particles]
bbox_max = [np.max(x.get()) for x in particles]
bbox_cl, evt = bbf(particles, radii=None)
bbox_cl = bbox_cl.get()
bbox_min_cl = np.empty(dims, dtype)
bbox_max_cl = np.empty(dims, dtype)
for i, ax in enumerate(axis_names):
bbox_min_cl[i] = bbox_cl["min_"+ax]
bbox_max_cl[i] = bbox_cl["max_"+ax]
assert (bbox_min == bbox_min_cl).all()
assert (bbox_max == bbox_max_cl).all()
# }}}
# {{{ test basic (no source/target distinction) tree build
def run_build_test(builder, queue, dims, dtype, nparticles, do_plot,
max_particles_in_box=None, max_leaf_refine_weight=None,
refine_weights=None, **kwargs):
dtype = np.dtype(dtype)
if dtype == np.float32:
tol = 1e-4
elif dtype == np.float64:
tol = 1e-12
else:
raise RuntimeError("unsupported dtype: %s" % dtype)
logger.info(75*"-")
if max_particles_in_box is not None:
logger.info("%dD %s - %d particles - max %d per box - %s" % (
dims, dtype.type.__name__, nparticles, max_particles_in_box,
" - ".join(f"{k}: {v}" for k, v in kwargs.items())))
else:
logger.info("%dD %s - %d particles - max leaf weight %d - %s" % (
dims, dtype.type.__name__, nparticles, max_leaf_refine_weight,
" - ".join(f"{k}: {v}" for k, v in kwargs.items())))
logger.info(75*"-")
particles = make_normal_particle_array(queue, nparticles, dims, dtype)
if do_plot:
import matplotlib.pyplot as pt
pt.plot(particles[0].get(), particles[1].get(), "x")
queue.finish()
tree, _ = builder(queue, particles,
max_particles_in_box=max_particles_in_box,
refine_weights=refine_weights,
max_leaf_refine_weight=max_leaf_refine_weight,
debug=True, **kwargs)
tree = tree.get(queue=queue)
sorted_particles = np.array(list(tree.sources))
unsorted_particles = np.array([pi.get() for pi in particles])
assert (sorted_particles
== unsorted_particles[:, tree.user_source_ids]).all()
if refine_weights is not None:
refine_weights_reordered = refine_weights.get()[tree.user_source_ids]
all_good_so_far = True
if do_plot:
from boxtree.visualization import TreePlotter
plotter = TreePlotter(tree)
plotter.draw_tree(fill=False, edgecolor="black", zorder=10)
plotter.set_bounding_box()
from boxtree import box_flags_enum as bfe
scaled_tol = tol*tree.root_extent
for ibox in range(tree.nboxes):
# Empty boxes exist in non-pruned trees--which themselves are undocumented.
# These boxes will fail these tests.
if not (tree.box_flags[ibox] & bfe.HAS_OWN_SRCNTGTS):
continue
extent_low, extent_high = tree.get_box_extent(ibox)
assert (extent_low >= tree.bounding_box[0] - scaled_tol).all(), (
ibox, extent_low, tree.bounding_box[0])
assert (extent_high <= tree.bounding_box[1] + scaled_tol).all(), (
ibox, extent_high, tree.bounding_box[1])
start = tree.box_source_starts[ibox]
box_children = tree.box_child_ids[:, ibox]
existing_children = box_children[box_children != 0]
assert (tree.box_source_counts_nonchild[ibox]
+ np.sum(tree.box_source_counts_cumul[existing_children])
== tree.box_source_counts_cumul[ibox])
box_particles = sorted_particles[:,
start:start+tree.box_source_counts_cumul[ibox]]
good = (
(box_particles < extent_high[:, np.newaxis] + scaled_tol)
& (extent_low[:, np.newaxis] - scaled_tol <= box_particles))
all_good_here = good.all()
if do_plot and not all_good_here and all_good_so_far:
pt.plot(
box_particles[0, np.where(~good)[1]],
box_particles[1, np.where(~good)[1]], "ro")
plotter.draw_box(ibox, edgecolor="red")
if not all_good_here:
print("BAD BOX", ibox)
if not (tree.box_flags[ibox] & bfe.HAS_CHILDREN):
# Check that leaf particle density is as promised.
nparticles_in_box = tree.box_source_counts_cumul[ibox]
if max_particles_in_box is not None:
if nparticles_in_box > max_particles_in_box:
print("too many particles ({} > {}); box {}".format(
nparticles_in_box, max_particles_in_box, ibox))
all_good_here = False
else:
assert refine_weights is not None
box_weight = np.sum(
refine_weights_reordered[start:start+nparticles_in_box])
if box_weight > max_leaf_refine_weight:
print("refine weight exceeded ({} > {}); box {}".format(
box_weight, max_leaf_refine_weight, ibox))
all_good_here = False
all_good_so_far = all_good_so_far and all_good_here
if do_plot:
pt.gca().set_aspect("equal", "datalim")
pt.show()
assert all_good_so_far
def particle_tree_test_decorator(f):
f = pytest.mark.opencl(f)
f = pytest.mark.parametrize("dtype", [np.float64, np.float32])(f)
f = pytest.mark.parametrize("dims", [2, 3])(f)
return f
@particle_tree_test_decorator
def test_single_box_particle_tree(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims,
dtype, 4, max_particles_in_box=30, do_plot=do_plot)
@particle_tree_test_decorator
def test_two_level_particle_tree(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims,
dtype, 50, max_particles_in_box=30, do_plot=do_plot)
@particle_tree_test_decorator
def test_unpruned_particle_tree(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
# test unpruned tree build
run_build_test(builder, queue, dims, dtype, 10**5,
do_plot=do_plot, max_particles_in_box=30, skip_prune=True)
@particle_tree_test_decorator
def test_particle_tree_with_reallocations(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims, dtype, 10**5,
max_particles_in_box=30, do_plot=do_plot, nboxes_guess=5)
@particle_tree_test_decorator
def test_particle_tree_with_many_empty_leaves(
ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims, dtype, 10**5,
max_particles_in_box=5, do_plot=do_plot)
@particle_tree_test_decorator
def test_vanilla_particle_tree(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims, dtype, 10**5,
max_particles_in_box=30, do_plot=do_plot)
@particle_tree_test_decorator
def test_explicit_refine_weights_particle_tree(ctx_factory, dtype, dims,
do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
nparticles = 10**5
from pyopencl.clrandom import PhiloxGenerator
rng = PhiloxGenerator(ctx, seed=10)
refine_weights = rng.uniform(queue, nparticles, dtype=np.int32, a=1, b=10)
run_build_test(builder, queue, dims, dtype, nparticles,
refine_weights=refine_weights, max_leaf_refine_weight=100,
do_plot=do_plot)
@particle_tree_test_decorator
def test_non_adaptive_particle_tree(ctx_factory, dtype, dims, do_plot=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree import TreeBuilder
builder = TreeBuilder(ctx)
run_build_test(builder, queue, dims, dtype, 10**4,
max_particles_in_box=30, do_plot=do_plot, kind="non-adaptive")
# }}}
# {{{ source/target tree
@pytest.mark.opencl
@pytest.mark.parametrize("dims", [2, 3])
def test_source_target_tree(ctx_factory, dims, do_plot=False):
logging.basicConfig(level=logging.INFO)
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
nsources = 2 * 10**5
ntargets = 3 * 10**5
dtype = np.float64
sources = make_normal_particle_array(queue, nsources, dims, dtype,
seed=12)
targets = make_normal_particle_array(queue, ntargets, dims, dtype,
seed=19)
if do_plot:
import matplotlib.pyplot as pt
pt.plot(sources[0].get(), sources[1].get(), "rx")
pt.plot(targets[0].get(), targets[1].get(), "g+")
pt.show()
from boxtree import TreeBuilder
tb = TreeBuilder(ctx)
queue.finish()
tree, _ = tb(queue, sources, targets=targets,
max_particles_in_box=10, debug=True)
tree = tree.get(queue=queue)
sorted_sources = np.array(list(tree.sources))
sorted_targets = np.array(list(tree.targets))
unsorted_sources = np.array([pi.get() for pi in sources])
unsorted_targets = np.array([pi.get() for pi in targets])
assert (sorted_sources
== unsorted_sources[:, tree.user_source_ids]).all()
user_target_ids = np.empty(tree.ntargets, dtype=np.intp)
user_target_ids[tree.sorted_target_ids] = np.arange(tree.ntargets, dtype=np.intp)
assert (sorted_targets
== unsorted_targets[:, user_target_ids]).all()
all_good_so_far = True
if do_plot:
from boxtree.visualization import TreePlotter
plotter = TreePlotter(tree)
plotter.draw_tree(fill=False, edgecolor="black", zorder=10)
plotter.set_bounding_box()
tol = 1e-15
for ibox in range(tree.nboxes):
extent_low, extent_high = tree.get_box_extent(ibox)
assert (extent_low
>= tree.bounding_box[0] - 1e-12*tree.root_extent).all(), ibox
assert (extent_high
<= tree.bounding_box[1] + 1e-12*tree.root_extent).all(), ibox
src_start = tree.box_source_starts[ibox]
tgt_start = tree.box_target_starts[ibox]
box_children = tree.box_child_ids[:, ibox]
existing_children = box_children[box_children != 0]
assert (tree.box_source_counts_nonchild[ibox]
+ np.sum(tree.box_source_counts_cumul[existing_children])
== tree.box_source_counts_cumul[ibox])
assert (tree.box_target_counts_nonchild[ibox]
+ np.sum(tree.box_target_counts_cumul[existing_children])
== tree.box_target_counts_cumul[ibox])
for what, particles in [
("sources", sorted_sources[:,
src_start:src_start+tree.box_source_counts_cumul[ibox]]),
("targets", sorted_targets[:,
tgt_start:tgt_start+tree.box_target_counts_cumul[ibox]]),
]:
good = (
(particles < extent_high[:, np.newaxis] + tol)
& (extent_low[:, np.newaxis] - tol <= particles)
).all(axis=0)
all_good_here = good.all()
if do_plot and not all_good_here:
pt.plot(
particles[0, np.where(~good)[0]],
particles[1, np.where(~good)[0]], "ro")
plotter.draw_box(ibox, edgecolor="red")
pt.show()
if not all_good_here:
print("BAD BOX %s %d" % (what, ibox))
all_good_so_far = all_good_so_far and all_good_here
assert all_good_so_far
if do_plot:
pt.gca().set_aspect("equal", "datalim")
pt.show()
# }}}
# {{{ test sources/targets-with-extent tree
@pytest.mark.opencl
@pytest.mark.parametrize("dims", [2, 3])
@pytest.mark.parametrize("extent_norm", ["linf", "l2"])
def test_extent_tree(ctx_factory, dims, extent_norm, do_plot=False):
logging.basicConfig(level=logging.INFO)
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
nsources = 100000
ntargets = 200000
dtype = np.float64
npoint_sources_per_source = 16
sources = make_normal_particle_array(queue, nsources, dims, dtype,
seed=12)
targets = make_normal_particle_array(queue, ntargets, dims, dtype,
seed=19)
refine_weights = cl.array.zeros(queue, nsources+ntargets, np.int32)
refine_weights[:nsources] = 1
from pyopencl.clrandom import PhiloxGenerator
rng = PhiloxGenerator(queue.context, seed=13)
source_radii = 2**rng.uniform(queue, nsources, dtype=dtype,
a=-10, b=0)
target_radii = 2**rng.uniform(queue, ntargets, dtype=dtype,
a=-10, b=0)
from boxtree import TreeBuilder
tb = TreeBuilder(ctx)
queue.finish()
dev_tree, _ = tb(queue, sources, targets=targets,
source_radii=source_radii,
target_radii=target_radii,
extent_norm=extent_norm,
refine_weights=refine_weights,
max_leaf_refine_weight=20,
#max_particles_in_box=10,
# Set artificially small, to exercise the reallocation code.
nboxes_guess=10,
debug=True,
stick_out_factor=0)
logger.info("transfer tree, check orderings")
tree = dev_tree.get(queue=queue)
if do_plot:
import matplotlib.pyplot as pt
pt.plot(sources[0].get(), sources[1].get(), "rx")
pt.plot(targets[0].get(), targets[1].get(), "g+")
from boxtree.visualization import TreePlotter
plotter = TreePlotter(tree)
plotter.draw_tree(fill=False, edgecolor="black", zorder=10)
plotter.draw_box_numbers()
plotter.set_bounding_box()
pt.gca().set_aspect("equal", "datalim")
pt.show()
sorted_sources = np.array(list(tree.sources))
sorted_targets = np.array(list(tree.targets))
| |
PHP 123,456.79),
* `multicurrency.pound.PoundSterling` (£123,456.79 | £123,456.79 | GBP 123,456.79),
* `multicurrency.pound.PoundSterlingGB` (£123,456.79 | GB£123,456.79 | GBP 123,456.79),
* `multicurrency.pound.PoundSterlingGG` (£123,456.79 | GG£123,456.79 | GBP 123,456.79),
* `multicurrency.pound.PoundSterlingIM` (£123,456.79 | IM£123,456.79 | GBP 123,456.79),
* `multicurrency.pound.PoundSterlingIO` (£123,456.79 | IO£123,456.79 | GBP 123,456.79),
* `multicurrency.pula.Pula` (P 123,456.79 | P 123,456.79 | BWP 123,456.79),
* `multicurrency.rial.QatariRial` (ر.ق. ١٢٣٬٤٥٦٫٧٩ | ر.ق. ١٢٣٬٤٥٦٫٧٩ | QAR 123,456.79),
* `multicurrency.quetzal.Quetzal` (Q 123,456.79 | Q 123,456.79 | GTQ 123,456.79),
* `multicurrency.rand.Rand` (R 123 456.79 | R 123 456.79 | ZAR 123,456.79),
* `multicurrency.rand.RandLS` (R 123,456.79 | LSR 123,456.79 | ZAR 123,456.79),
* `multicurrency.rand.RandNA` (R 123 456.79 | NAR 123 456.79 | ZAR 123,456.79),
* `multicurrency.rand.RandZA` (R 123 456.79 | ZAR 123 456.79 | ZAR 123,456.79),
* `multicurrency.rial.RialOmani` (ر.ع. ١٢٣٬٤٥٦٫٧٨٩ | ر.ع. ١٢٣٬٤٥٦٫٧٨٩ | OMR 123,456.789),
* `multicurrency.riel.Riel` (123.456,79៛ | 123.456,79៛ | KHR 123,456.79),
* `multicurrency.rufiyaa.Rufiyaa` (ރ. 123,456.79 | ރ. 123,456.79 | MVR 123,456.79),
* `multicurrency.rupiah.Rupiah` (Rp 123.456,79 | Rp 123.456,79 | IDR 123,456.79),
* `multicurrency.ruble.RussianRuble` (123 456,79 ₽ | 123 456,79 ₽ | RUB 123,456.79),
* `multicurrency.ruble.RussianRubleGE` (123 456,79 ₽ | 123 456,79 GE₽ | RUB 123,456.79),
* `multicurrency.ruble.RussianRubleRU` (123 456,79 ₽ | 123 456,79 RU₽ | RUB 123,456.79),
* `multicurrency.franc.RwandaFranc` (₣ 123.457 | RW₣ 123.457 | RWF 123,457),
* `multicurrency.pound.SaintHelenaPound` (£123,456.79 | SH£123,456.79 | SHP 123,456.79),
* `multicurrency.riyal.SaudiRiyal` (ر.س. ١٢٣٬٤٥٦٫٧٩ | ر.س. ١٢٣٬٤٥٦٫٧٩ | SAR 123,456.79),
* `multicurrency.dinar.SerbianDinarSR` (123 456,79 дин. | 123 456,79 дин. | RSD 123,456.79),
* `multicurrency.dinar.SerbianDinarXK` (123.456,79 дин. | 123.456,79 дин. | RSD 123,456.79),
* `multicurrency.rupee.SeychellesRupee` (₨ 123,456.79 | ₨ 123,456.79 | SCR 123,456.79),
* `multicurrency.dollar.SingaporeDollar` ($123,456.79 | $123,456.79 | SGD 123,456.79),
* `multicurrency.dollar.SingaporeDollarBN` ($123,456.79 | BN$123,456.79 | SGD 123,456.79),
* `multicurrency.dollar.SingaporeDollarSG` ($123,456.79 | SG$123,456.79 | SGD 123,456.79),
* `multicurrency.dollar.SolomonIslandsDollar` ($123,456.79 | SB$123,456.79 | SBD 123,456.79),
* `multicurrency.som.Som` (123 456,79 Лв | 123 456,79 Лв | KGS 123,456.79),
* `multicurrency.shilling.SomaliShilling` (SSh 123,456.79 | SSh 123,456.79 | SOS 123,456.79),
* `multicurrency.somoni.Somoni` (ЅМ 123,456.79 | ЅМ 123,456.79 | TJS 123,456.79),
* `multicurrency.won.SouthKoreanWon` (₩123,457 | ₩123,457 | KRW 123,457),
* `multicurrency.rupee.SriLankaRupee` (රු. 123,456.79 | රු. 123,456.79 | LKR 123,456.79),
* `multicurrency.pound.SudanesePound` (١٢٣٬٤٥٦٫٧٩ ج.س | ١٢٣٬٤٥٦٫٧٩ ج.س | SDG 123,456.79),
* `multicurrency.dollar.SurinameDollar` ($ 123.456,79 | SR$ 123.456,79 | SRD 123,456.79),
* `multicurrency.krona.SwedishKrona` (123 456,79 kr | 123 456,79 kr | SEK 123,456.79),
* `multicurrency.franc.SwissFranc` (₣ 123'456.79 | ₣ 123'456.79 | CHF 123,456.79),
* `multicurrency.franc.SwissFrancCH` (₣ 123'456.79 | CH₣ 123'456.79 | CHF 123,456.79),
* `multicurrency.franc.SwissFrancLI` (₣ 123'456.79 | LI₣ 123'456.79 | CHF 123,456.79),
* `multicurrency.pound.SyrianPound` (١٢٣٬٤٥٦٫٧٩ ل.س | ١٢٣٬٤٥٦٫٧٩ ل.س | SYP 123,456.79),
* `multicurrency.dollar.TaiwanDollar` ($123,456.79 | TW$123,456.79 | TWD 123,456.79),
* `multicurrency.taka.Taka` (১২৩,৪৫৬.৭৯৳ | ১২৩,৪৫৬.৭৯৳ | BDT 123,456.79),
* `multicurrency.tala.Tala` (T 123,456.79 | T 123,456.79 | WST 123,456.79),
* `multicurrency.shilling.TanzanianShilling` (TSh 123,456.79 | TSh 123,456.79 | TZS 123,456.79),
* `multicurrency.tenge.Tenge` (123 456,79 〒 | 123 456,79 〒 | KZT 123,456.79),
* `multicurrency.dollar.TrinidadandTobagoDollar` ($123,456.79 | TT$123,456.79 | TTD 123,456.79),
* `multicurrency.tugrik.Tugrik` (₮ 123,456.79 | ₮ 123,456.79 | MNT 123,456.79),
* `multicurrency.dinar.TunisianDinar` (د.ت. 123.456,789 | د.ت. 123.456,789 | TND 123,456.789),
* `multicurrency.lira.TurkishLira` (₤123.456,79 | ₤123.456,79 | TRY 123,456.79),
* `multicurrency.lira.TurkishLiraCY` (₤123.456,79 | CY₤123.456,79 | TRY 123,456.79),
* `multicurrency.lira.TurkishLiraTR` (₤123.456,79 | TR₤123.456,79 | TRY 123,456.79),
* `multicurrency.dirham.UAEDirham` (د.إ. ١٢٣٬٤٥٦٫٧٩ | د.إ. ١٢٣٬٤٥٦٫٧٩ | AED 123,456.79),
* `multicurrency.dollar.USDollar` ($123,456.79 | US$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarAS` ($123,456.79 | AS$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarFM` ($123,456.79 | FM$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarGU` ($123,456.79 | GU$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarHT` ($123,456.79 | HT$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarIO` ($123,456.79 | IO$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarMH` ($123,456.79 | MH$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarMP` ($123,456.79 | MP$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarPA` ($123,456.79 | PA$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarPC` ($123,456.79 | PC$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarPR` ($123,456.79 | PR$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarPW` ($123,456.79 | PW$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarTC` ($123,456.79 | TC$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarVG` ($123,456.79 | VG$123,456.79 | USD 123,456.79),
* `multicurrency.dollar.USDollarVI` ($123,456.79 | VI$123,456.79 | USD 123,456.79),
* `multicurrency.shilling.UgandaShilling` (USh 123,457 | USh 123,457 | UGX 123,457),
* `multicurrency.sum.UzbekistanSum` (123 456,79 сўм | 123 456,79 сўм | UZS 123,456.79),
* `multicurrency.vatu.Vatu` (Vt 123,457 | Vt 123,457 | VUV 123,457),
* `multicurrency.rial.YemeniRial` (١٢٣٬٤٥٦٫٧٩ ﷼ | ١٢٣٬٤٥٦٫٧٩ ﷼ | YER 123,456.79),
* `multicurrency.yen.Yen` (¥123,457 | ¥123,457 | JPY 123,457),
* `multicurrency.yuan.Yuan` (¥123,456.79 | ¥123,456.79 | CNY 123,456.79),
* `multicurrency.kwacha.ZambianKwacha` (ZK 123,456.79 | ZK 123,456.79 | ZMW 123,456.79),
* `multicurrency.dollar.ZimbabweDollar` ($ 123,456.79 | ZW$ 123,456.79 | ZWL 123,456.79)
List of supported cryptocurrencies (with the default, localized, and
international formats):
* `multicurrency.crypto.Bitcoin` (₿123,456.78900000 | ₿123,456.78900000 | XBT 123,456.78900000),
* `multicurrency.crypto.EOS` (ε123,456.7890 | ε123,456.7890 | EOS 123,456.7890),
* `multicurrency.crypto.Ethereum` (Ξ123,456.789000000000000000 | Ξ123,456.789000000000000000 | ETH 123,456.789000000000000000),
* `multicurrency.crypto.Monero` (ɱ123,456.789000000000 | ɱ123,456.789000000000 | XMR 123,456.789000000000),
* `multicurrency.crypto.Ripple` (✕123,456.789000 | ✕123,456.789000 | XRP 123,456.789000),
* `multicurrency.crypto.StellarLumens` (*123,456.7890000 | *123,456.7890000 | XLM 123,456.7890000),
* `multicurrency.crypto.Tezos` (ꜩ123,456.789000 | ꜩ123,456.789000 | XTZ 123,456.789000),
* `multicurrency.crypto.Zcash` (ⓩ123,456.78900000 | ⓩ123,456.78900000 | ZEC 123,456.78900000)
## Usage
Simple usage example:
>>> from multicurrency import Euro
>>> euro = Euro(1000)
>>> print(euro)
1.000,00 €
>>> print(euro + Euro(0.50))
1.000,50 €
Unsupported currencies can be represented by creating a generic
`multicurrency.currency.Currency` object with the desired settings.
>>> from multicurrency import Currency
>>> bitcoin = Currency(
... amount=1000,
... alpha_code='XBT',
... numeric_code='0',
... symbol='₿',
... symbol_ahead=True,
... symbol_separator='',
... decimal_places=8,
... decimal_sign='.',
... grouping_sign=',')
>>> print(bitcoin)
₿1,000.00000000
To help working with unsupported currencies the settings can be defined
in a dictionary and used when needed:
>>> from multicurrency import Currency
>>> settings = {
... 'alpha_code':'XBT',
... 'numeric_code':'0',
... 'symbol':'₿',
... 'symbol_ahead':True,
... 'symbol_separator':'',
... 'decimal_places':8,
... 'decimal_sign':'.',
... 'grouping_sign':','}
>>> bitcoin = Currency(1000, **settings)
>>> print(bitcoin)
₿1,000.00000000
Currencies can also be represented with the ISO 4217 three-letter code
instead of the `symbol`.
>>> from multicurrency import Euro
>>> euro = Euro(1000, international=True)
>>> print(euro)
EUR 1,000.00
## Localization
The multicurrency library allows you to obtain a localized version of the
currency representation:
>>> from multicurrency import TaiwanDollar, USDollar
>>> tw_dollar = TaiwanDollar('27.65')
>>> us_dollar = USDollar('1')
>>> print(us_dollar.lstr(), '=', tw_dollar.lstr())
US$1.00 = TW$27.65
## Precision
The multicurrency library has a user alterable precision (defaulting to
28 places) which can be as large as needed for a given problem:
>>> from multicurrency import CurrencyContext, Euro
>>> for precision in [1, 2, 3, 4, 5, 6]:
... CurrencyContext.prec = precision
... result = Euro(1) / 7
... print(result.pstr(precision))
0,1 €
0,14 €
0,143 €
0,1429 €
0,14286 €
0,142857 €
It also has a user alterable rounding method (defaulting to
ROUND_HALF_EVEN) which can be changed as needed:
>>> from multicurrency import CurrencyContext, Euro
>>> CurrencyContext.prec = 4
>>> for rounding in [
... 'ROUND_CEILING',
... 'ROUND_DOWN',
... 'ROUND_FLOOR',
... 'ROUND_HALF_DOWN',
... 'ROUND_HALF_EVEN',
... 'ROUND_HALF_UP',
... 'ROUND_UP',
... 'ROUND_05UP']:
... CurrencyContext.rounding = rounding
... result = Euro(1) / 7
... print(f'{rounding:16}', result.pstr(4))
ROUND_CEILING 0,1429 €
ROUND_DOWN 0,1428 €
ROUND_FLOOR 0,1428 €
ROUND_HALF_DOWN 0,1429 €
ROUND_HALF_EVEN 0,1429 €
ROUND_HALF_UP 0,1429 €
ROUND_UP 0,1429 €
ROUND_05UP 0,1428 €
Default values can be restored with:
>>> from multicurrency import (
... CurrencyContext,
... DEFAULT_PRECISION,
... DEFAULT_ROUNDING)
>>> CurrencyContext.prec = DEFAULT_PRECISION
>>> CurrencyContext.rounding = DEFAULT_ROUNDING
>>> print(CurrencyContext.prec, CurrencyContext.rounding)
28 ROUND_HALF_EVEN
Supported rounding methods are described on the
`multicurrency.currency.CurrencyContext` class.
## Formatting
The `multicurrency.currency.Currency` class allows you to create and
customize your own value formatting behaviors using the same
implementation as the built-in `format()` method.
The specification for the formatting feature is as follows:
[dp][ds][gs][gp][spec]
The meaning of the various alignment options is as follows:
| Option | Type | Meaning |
|:-------|:-------|:--------------------------------------------------------------------------------------|
| [dp] | int+ | The number of decimal places (integer number with one or more digits). |
| [ds] | str{1} | The decimal sign (single non-digit character). |
| [gs] | str{1} | The grouping sign (single non-digit character). |
| [gp] | int+ | The number of digits to group the number by (integer number with one or more digits). |
| [spec] | str | The formatting spec (a strig with the order of currency parts). |
All fields are optional although for the first four fields when setting
one the fields on the left of that are required to be set as well.
The available string currency parts for `[spec]` are:
| Part | Meaning |
|:-----|:----------------------------------------------------------------------------------------------------------------------------|
| %a | The currency's amount as seen in the default representation of the currency (the numeral system of the currency's country). |
| %A | The currency's amount in (western) arabic numerals. |
| %c | The currency's alpha code (as seen on the international representation of the currency). |
| %s | The currency's symbol. |
| %_ | The currency's symbol separator. |
Basic examples of how to use the `multicurrency.currency.Currency`
formatting feature:
Using the built-in `format()` method
>>> from multicurrency import Euro
>>> euro = Euro(1000000*(1/7))
>>> format(euro, '4%a')
'142.857,1429'
Using the `'new' string` formating method
>>> from multicurrency import Euro
>>> euro = Euro(1000000*(1/7))
>>> '{:4%a}'.format(euro)
'142.857,1429'
Using the `f-string` method
>>> from multicurrency import Euro
>>> euro = Euro(1000000*(1/7))
>>> f'{euro:4%a}'
'142.857,1429'
Some more examples of the `multicurrency.currency.Currency` formatting
feature usage (using the `f-string` method):
>>> from multicurrency import Euro
>>> euro = Euro(1000000*(1/7))
>>> print(euro)
142.857,14 €
>>> print(f'{euro}')
142.857,14 €
>>> print(f'{euro:_}')
142.857_14 €
>>> print(f'{euro:.,}')
142,857.14 €
>>> print(f'{euro:3.,}')
142,857.143 €
>>> print(f'{euro:3.,2}')
14,28,57.143 €
>>> print(f'{euro:_2}')
14.28.57_14 €
>>> print(f'{euro:.,2}')
14,28,57.14 €
>>> print(f'{euro:3%a}')
142.857,143
>>> print(f'{euro:3_%a}')
142.857_143
>>> print(f'{euro:3#_%a}')
142_857#143
>>> print(f'{euro:3.,2%a}')
14,28,57.143
>>> print(f'{euro:3.,4%a}')
14,2857.143
>>> print(f'{euro:.,4%a}')
14,2857.14
>>> | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# pylint: disable-msg=E0202, E0102, E1101, E1103, E1001
"""
Created on Fri Jan 24 09:46:28 2014
This module contains the definition of the interpretation class, which is the
basic unit of the search process which tries to solve interpretation problems.
@author: <NAME>
"""
from .observable import Observable, EventObservable, between, overlap, end_cmp_key
from .interval import Interval as Iv
from .constraint_network import verify
import kardioml.segmentation.teijeiro.knowledge.abstraction_patterns as ap
import kardioml.segmentation.teijeiro.knowledge.constants as C
import kardioml.segmentation.teijeiro.acquisition.obs_buffer as obsbuf
import sortedcontainers
import weakref
import copy
import numpy as np
from collections import deque, namedtuple as nt
##################################################
## Utility functions to detect merge situations ##
##################################################
def _pat_mergeable(p1, p2):
"""
Compare two *AbstractionPattern* instances for equality regarding an
interpretation merging operation. Evidence and hypothesis comparison is
assumed to be positive, so only the automata and the initial and final
states are compared.
"""
if p1 is None or p2 is None:
return p1 is p2
return p1.automata is p2.automata and p1.istate == p2.istate and p1.fstate == p2.fstate
def _focus_mergeable(f1, f2):
"""
Compare two focuses of attention for equality regarding an interpretation
merging operation. The length of the lists within the two focuses are
assumed to be equal, but this has to be tested separately.
"""
return all(
f1._lst[i][0] == f2._lst[i][0] and _pat_mergeable(f1._lst[i][1], f2._lst[i][1])
for i in range(len(f1._lst) - 1, -1, -1)
)
class PastMetrics(nt('PastMetrics', 'time, abst, abstime, nhyp')):
"""
Tuple to store relevant information to evaluate an interpretation until
a specific time, to allow discard old observations.
"""
__slots__ = ()
def diff(self, other):
"""
Obtains the difference between two PastMetrics tuples, returned
as a numpy array with three components. *time* attribute is excluded
from diff.
"""
return np.array((self.abst - other.abst, self.abstime - other.abstime, self.nhyp - other.nhyp))
def patch(self, patch):
"""
Obtains a new PastMetrics object by applying a difference array,
obtained by the *diff* method.
Parameters
----------
patch:
Array, list or tuple with exactly three numerical values.
"""
return PastMetrics(self.time, *np.array(self[1:] + patch))
class Focus(object):
"""
This class represents the focus of attention of an interpretation, and it
encapsulates all data and functionality related with its management.
"""
__slots__ = '_lst'
def __init__(self, parent_focus=None):
"""
Initializes a new empty focus of attention, or a shallow copy of an
existing focus.
Instance Properties
-------------------
_lst:
Stack containing a number of tuples (observation_or_finding,
pattern). If 'observation_or_finding' is a finding, then 'pattern'
is the abstraction pattern generating such finding. If is an
observation, then 'pattern' is the pattern for which the
observation is its hypothesis, or None if it is an initial
observation.
"""
if parent_focus is None:
self._lst = []
else:
self._lst = parent_focus._lst[:]
def __len__(self):
return len(self._lst)
def __contains__(self, key):
return any(key is v for v, _ in self._lst)
def __nonzero__(self):
return bool(self._lst)
def push(self, obs, pattern):
"""
Inserts a new observation or finding in the focus of attention.
"""
self._lst.append((obs, pattern))
def pop(self, n=1):
"""Removes 'n' elements from the focus of attention (1 by default)"""
del self._lst[-n]
@property
def top(self):
"""Obtains the element at the top of the focus of attention"""
return self._lst[-1]
@top.setter
def top(self, value):
"""Modifies the element at the top of the focus of attention"""
self._lst[-1] = value
@property
def patterns(self):
"""
Obtains an iterator over the patterns supporting the observations or
findings in the focus of attention, starting at the top of the stack.
"""
return (p for _, p in reversed(self._lst))
@property
def nhyp(self):
"""Returns the number of abstraction hypotheses in this focus"""
return sum(1 for o, p in self._lst if p is not None and o is p.hypothesis)
@property
def earliest_time(self):
"""
Returns the minimum starting time of observations or findings in
this focus of attention.
"""
return min(o.earlystart for o, _ in self._lst)
def get_delayed_finding(self, observation):
"""
Obtains the finding that will be matched with an observation once
the observation is fully observed, or None if the observation will
not be matched with a finding.
"""
for i in range(len(self._lst) - 1, 0, -1):
if self._lst[i][0] is observation:
f, p = self._lst[i - 1]
if p is not None and f is p.finding:
return f
break
return None
def match(self, finding, obs):
"""
Performs a matching operation between the finding at the top of the
focus with a given observation, checking the time and value consistency
of the matching. After consistency is checked, the finding is removed
from the focus by means of a pop() operation.
"""
f, pat = self._lst[-1]
assert finding is f
verify(
obs not in pat.evidence[pat.get_evidence_type(f)[0]],
'Observation {0} is already in the evidence of {1} pattern',
(obs, pat),
)
patcp = copy.copy(pat)
patcp.match(f, obs)
# The hypothesis generating the finding is updated
self._lst[-2] = (patcp.hypothesis, patcp)
# And the matched finding removed from the focus
del self._lst[-1]
class Interpretation(object):
"""
This class represents the interpretation entity, which is a consistent
group of abstraction hypotheses combined by the knowledge expressed in
abstraction patterns. It is the basic entity in our search process, and
the result of an interpretation process.
"""
__slots__ = (
'name',
'_parent',
'child',
'observations',
'unintelligible',
'singletons',
'abstracted',
'nabd',
'focus',
'past_metrics',
'predinfo',
'__weakref__',
)
counter = 0
def __init__(self, parent=None):
"""
Creates a new empty interpretation, initializing its attributes as a
shallow copy or a direct assigment of the attributes of the parent. If
parent is None, the attributes will be empty.
Instance Properties
-------------------
name:
Unique identificator of the interpretation.
parent:
Interpretation from which this one is derived, or None if this is
a root interpretation.
child:
List of interpretations derived from this one.
past_metrics:
Summary of old information used for heuristics calculation.
observations:
Sortedlist containing all the observations in the interpretation,
ordered by their start time. NOTE: This property is directly
assigned from parent interpretation by default.
singletons:
Set with all Singleton hypotheses that are present in this
interpretation. NOTE: This property is directly assigned
from parent interpretation by default.
abstracted:
SortedList containing all the observations that are abstracted by
some abstraction pattern in this interpretation. NOTE: This
property is directly assigned from parent interpretation by default
unintelligible:
SortedList containing all the observations that cannot be
abstracted by any abstraction pattern. NOTE: This property is
directly assigned from parent interpretation by default.
nabd:
Number of hypotheses in the interpretation that can be abstracted
by a higher-level hypothesis. This value is used for the evaluation
of the interpretation.
focus:
Stack containing the focus of attention of the interpretation. Each
element in this stack is an observation or a non-matched finding
of a pattern.
predinfo:
Dictionary to store predecessor information for consecutive
observations. Each entry is a 2-tuple (observation, type) with
the predecessor observation and the type declared by the pattern
for the consecutivity relation. NOTE: This property is directly
assigned from parent interpretation by default.
"""
self.name = str(Interpretation.counter)
if parent is None:
self._parent = None
self.child = []
self.observations = sortedcontainers.SortedList(key=end_cmp_key)
self.singletons = set()
self.abstracted = sortedcontainers.SortedList(key=end_cmp_key)
self.unintelligible = sortedcontainers.SortedList(key=end_cmp_key)
self.nabd = 0
self.past_metrics = PastMetrics(0, 0, 0, 0)
self.focus = Focus()
self.predinfo = {}
else:
self._parent = weakref.ref(parent, self._on_parent_deleted)
self.child = []
self.parent.child.append(self)
self.observations = parent.observations
self.singletons = parent.singletons
self.abstracted = parent.abstracted
self.unintelligible = parent.unintelligible
self.nabd = parent.nabd
self.past_metrics = parent.past_metrics
self.focus = Focus(parent.focus)
self.predinfo = parent.predinfo
Interpretation.counter += 1
def __str__(self):
"""
Obtains the representation of the interpretation as a character string.
"""
return self.name
def __repr__(self):
return self.name
def _on_parent_deleted(self, _):
"""
Callback function called when the parent interpretation is deleted.
"""
self._parent = None
def _get_types(self, obs):
"""
Obtains a tuple with the types that are used respect to an observation,
both as hypothesis and as evidence of different patterns.
"""
types = {type(obs)}.union({p.get_evidence_type(obs)[0] for p in self.pat_map[obs][1]})
dmatch = self.get_delayed_finding(obs)
if dmatch is not None:
types = types.union(
{type(dmatch)}, {p.get_evidence_type(dmatch)[0] for p in self.pat_map[dmatch][1]}
)
return tuple(types)
def _get_proper_obs(self, clazz=Observable, start=0, end=np.inf, filt=lambda obs: True, reverse=False):
"""
Obtains a list of observations matching the search criteria, ordered
by the earliest time of the observation.
Parameters
----------
clazz:
Only | |
#!/usr/bin/env python3
import argparse
import datetime
import gzip
import multiprocessing
import os
import subprocess
import sys
import tarfile
import urllib.request
import about
import check
import shared
import tax
def parse_arguments():
date = str(datetime.datetime.now().date())
parser = argparse.ArgumentParser(prog='CAT prepare',
description='Download and construct '
'CAT/BAT database files.',
usage='CAT prepare (--fresh | '
'--existing) [options] [-h / '
'--help]',
add_help=False)
required_choice = parser.add_argument_group('Required choice')
group = required_choice.add_mutually_exclusive_group(required=True)
group.add_argument('--fresh',
dest='fresh',
action='store_true',
help='Start with a fresh database.')
group.add_argument('--existing',
dest='fresh',
action='store_false',
help='Start with an existing database. CAT prepare '
'will search the supplied database and taxonomy '
'folders and only construct files that do not '
'exist yet.')
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('-d',
'--database_folder',
dest='database_folder',
metavar='',
required=False,
type=str,
default='{0}_CAT_database'.format(date),
help='Name of folder to which database files will '
'be written (default: {date}_CAT_database).')
optional.add_argument('-t',
'--taxonomy_folder',
dest='taxonomy_folder',
metavar='',
required=False,
type=str,
default='{0}_taxonomy'.format(date),
help='Name of folder to which taxonomy files will '
'be downloaded (default: {date}_taxonomy).')
optional.add_argument('--path_to_diamond',
dest='path_to_diamond',
metavar='',
required=False,
type=str,
default='diamond',
help='Path to DIAMOND binaries. Please supply if CAT'
' prepare can not find DIAMOND.')
optional.add_argument('-z',
'--compress',
dest='compress',
required=False,
action='store_true',
help='Compress output files.')
optional.add_argument('-q',
'--quiet',
dest='quiet',
required=False,
action='store_true',
help='Suppress verbosity.')
optional.add_argument('--no_log',
dest='no_log',
required=False,
action='store_true',
help='Suppress log file.')
optional.add_argument('-h',
'--help',
action='help',
help='Show this help message and exit.')
specific = parser.add_argument_group('DIAMOND specific optional arguments')
specific.add_argument('-n',
'--nproc',
dest='nproc',
metavar='',
required=False,
type=int,
default=multiprocessing.cpu_count(),
help='Number of cores to deploy by DIAMOND makedb '
'(default: maximum).')
(args, extra_args) = parser.parse_known_args()
extra_args = [arg for (i, arg) in enumerate(extra_args) if
(i, arg) != (0, 'prepare')]
if len(extra_args) > 0:
sys.exit('error: too much arguments supplied:\n{0}'
''.format('\n'.join(extra_args)))
return (args, date)
def download_taxonomy_files(taxonomy_folder, date, log_file, quiet):
url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump.tar.gz'
tmp_taxonomy_file = '{0}/{1}.taxdump.tar.gz'.format(taxonomy_folder, date)
message = ('Downloading and extracting taxonomy files from {0} to {1}.'
''.format(url, taxonomy_folder))
shared.give_user_feedback(message, log_file, quiet)
try:
urllib.request.urlretrieve(url, tmp_taxonomy_file)
except:
message = 'ERROR: download of taxonomy files failed.'
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
try:
with tarfile.open(tmp_taxonomy_file) as tar:
tar.extractall(taxonomy_folder)
except:
message = ('ERROR: something went wrong while extracting the taxonomy '
'files.')
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = 'Download complete!'
shared.give_user_feedback(message, log_file, quiet)
def download_prot_accession2taxid_file(prot_accession2taxid_file,
date,
log_file,
quiet):
url = ('ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/'
'prot.accession2taxid.gz')
message = ('Downloading mapping file from {0} to {1}.'
''.format(url, prot_accession2taxid_file))
shared.give_user_feedback(message, log_file, quiet)
try:
urllib.request.urlretrieve(url, prot_accession2taxid_file)
except:
message = 'ERROR: download of prot.accession2taxid.gz failed.'
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = 'Download complete!'
shared.give_user_feedback(message, log_file, quiet)
return prot_accession2taxid_file
def download_nr(nr_file, log_file, quiet):
url = 'ftp://ftp.ncbi.nlm.nih.gov/blast/db/FASTA/nr.gz'
message = ('Downloading nr database from {0} to {1}.'
''.format(url, nr_file))
shared.give_user_feedback(message, log_file, quiet)
try:
urllib.request.urlretrieve(url, nr_file)
except:
message = 'ERROR: download of nr database failed.'
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = 'Download complete!'
shared.give_user_feedback(message, log_file, quiet)
def make_diamond_database(path_to_diamond,
nr_file,
diamond_database_prefix,
nproc,
log_file,
quiet):
message = ('Constructing DIAMOND database {0}.dmnd from {1} '
'using {2} cores. Please be patient...'
''.format(diamond_database_prefix, nr_file, nproc))
shared.give_user_feedback(message, log_file, quiet)
command = [path_to_diamond, 'makedb',
'--in', nr_file,
'-d', diamond_database_prefix,
'-p', str(nproc),
'--quiet']
try:
subprocess.check_call(command)
except:
message = 'ERROR: DIAMOND database could not be created.'
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = 'DIAMOND database constructed!'
shared.give_user_feedback(message, log_file, quiet)
def import_prot_accession2taxid(prot_accession2taxid_file, log_file, quiet):
message = ('Loading {0} into memory. Please be patient...'
''.format(prot_accession2taxid_file))
shared.give_user_feedback(message, log_file, quiet)
prot_accession2taxid = {}
with gzip.open(prot_accession2taxid_file, 'rt') as f1:
for line in f1:
line = line.split('\t')
prot_accession2taxid[line[1]] = line[2]
return prot_accession2taxid
def make_fastaid2LCAtaxid_file(taxonomy_folder,
fastaid2LCAtaxid_file,
nr_file,
prot_accession2taxid_file,
log_file,
quiet):
prot_accession2taxid = import_prot_accession2taxid(prot_accession2taxid_file,
log_file,
quiet)
nodes_dmp = '{0}/nodes.dmp'.format(taxonomy_folder)
(taxid2parent, taxid2rank) = tax.import_nodes(nodes_dmp, log_file, quiet)
message = ('Finding LCA of all protein accession numbers in fasta headers '
'of {0}. Please be patient...'.format(nr_file))
shared.give_user_feedback(message, log_file, quiet)
corrected = 0
total = 0
with gzip.open(nr_file, 'rt') as f1, shared.open_maybe_gzip(fastaid2LCAtaxid_file, 'wt') as outf1:
for line in f1:
if not line.startswith('>'):
continue
line = line.lstrip('>').split('\x01')
accession_numbers = [i.split(' ')[0] for i in line]
fastaid = accession_numbers[0]
list_of_lineages = []
for accession_number in accession_numbers:
try:
taxid = prot_accession2taxid[accession_number]
lineage = tax.find_lineage(taxid, taxid2parent)
list_of_lineages.append(lineage)
except:
# This accounts for missing accession numbers in
# prot.accession2taxid and missing nodes in nodes.dmp.
continue
total += 1
if len(list_of_lineages) == 0:
# This accounts for entries that only contain accession numbers
# that are missing in prot.accession2taxid or whose taxid is
# missing in nodes.dmp. Note that these entries are thus not
# present in the output file.
continue
LCAtaxid = tax.find_LCA(list_of_lineages)
outf1.write('{0}\t{1}\n'.format(fastaid, LCAtaxid))
try:
if LCAtaxid != prot_accession2taxid[fastaid]:
corrected += 1
except:
# If the fastaid cannot be found in prot.accession2taxid, but
# a taxid is given to the fastaid based on secondary accession
# numbers, it is counted as a correction as well.
corrected += 1
message = ('Done! File {0} is created. '
'{1} of {2} headers ({3:.1f}%) corrected. Please wait '
'patiently for Python to collect garbage.'
''.format(fastaid2LCAtaxid_file,
corrected,
total,
corrected / total * 100))
shared.give_user_feedback(message, log_file, quiet)
def find_offspring(taxonomy_folder, fastaid2LCAtaxid_file, log_file, quiet):
nodes_dmp = '{0}/nodes.dmp'.format(taxonomy_folder)
(taxid2parent, taxid2rank) = tax.import_nodes(nodes_dmp, log_file, quiet)
message = 'Searching nr database for taxids with multiple offspring.'
shared.give_user_feedback(message, log_file, quiet)
taxid2offspring = {}
with shared.open_maybe_gzip(fastaid2LCAtaxid_file, 'rt') as f1:
for line in f1:
line = line.rstrip().split('\t')
taxid = line[1]
lineage = tax.find_lineage(taxid, taxid2parent)
for (i, taxid) in enumerate(lineage):
# The first taxid in the lineage does not have a daughter node.
if i == 0:
continue
if taxid not in taxid2offspring:
taxid2offspring[taxid] = set()
offspring = lineage[i - 1]
taxid2offspring[taxid].add(offspring)
return taxid2offspring
def write_taxids_with_multiple_offspring_file(taxids_with_multiple_offspring_file,
taxid2offspring,
log_file,
quiet):
message = 'Writing {0}.'.format(taxids_with_multiple_offspring_file)
shared.give_user_feedback(message, log_file, quiet)
with shared.open_maybe_gzip(taxids_with_multiple_offspring_file, 'wt') as outf1:
for taxid in taxid2offspring:
if len(taxid2offspring[taxid]) >= 2:
outf1.write('{0}\n'.format(taxid))
def prepare(step_list,
taxonomy_folder,
database_folder,
date,
prot_accession2taxid_file,
nr_file,
path_to_diamond,
diamond_database_prefix,
nproc,
fastaid2LCAtaxid_file,
taxids_with_multiple_offspring_file,
log_file,
quiet):
if 'download_taxonomy_files' in step_list:
download_taxonomy_files(taxonomy_folder, date, log_file, quiet)
if 'download_prot_accession2taxid_file' in step_list:
download_prot_accession2taxid_file(prot_accession2taxid_file,
date,
log_file,quiet)
if 'download_nr' in step_list:
download_nr(nr_file, log_file, quiet)
if 'make_diamond_database' in step_list:
make_diamond_database(path_to_diamond,
nr_file,
diamond_database_prefix,
nproc,
log_file,
quiet)
if 'make_fastaid2LCAtaxid_file' in step_list:
make_fastaid2LCAtaxid_file(taxonomy_folder,
fastaid2LCAtaxid_file,
nr_file,
prot_accession2taxid_file,
log_file,
quiet)
if 'make_taxids_with_multiple_offspring_file' in step_list:
taxid2offspring = find_offspring(taxonomy_folder,
fastaid2LCAtaxid_file,
log_file,
quiet)
write_taxids_with_multiple_offspring_file(taxids_with_multiple_offspring_file,
taxid2offspring,
log_file,
quiet)
message = ('\n-----------------\n\n'
'[{0}] CAT prepare is done!'.format(datetime.datetime.now()))
shared.give_user_feedback(message, log_file, quiet, show_time=False)
if nr_file is not None:
message = 'You may remove {0} now.'.format(nr_file)
shared.give_user_feedback(message, log_file, quiet, show_time=False)
message = ('\nSupply the following arguments to CAT or BAT if you want to '
'use the constructed database:\n'
'-d / --database_folder {0}\n'
'-t / --taxonomy_folder {1}'
''.format(database_folder,
taxonomy_folder))
shared.give_user_feedback(message, log_file, quiet, show_time=False)
def run_fresh(args, date):
(database_folder,
taxonomy_folder,
path_to_diamond,
compress,
quiet,
no_log,
nproc) = check.convert_arguments(args)
if no_log:
log_file = None
else:
log_file = '{0}.CAT_prepare.fresh.log'.format(date)
with open(log_file, 'w') as outf1:
pass
message = '# CAT v{0}.'.format(about.__version__)
shared.give_user_feedback(message, log_file, quiet, show_time=False)
message = ('\n'
'CAT prepare is running, constructing a fresh database.\n'
'Rawr!\n\n'
'WARNING: preparing the database files may take a couple of '
'hours.\n\n'
'Supplied command: {0}\n\n'
'Taxonomy folder: {1}/\n'
'Database folder: {2}/\n'
'Log file: {3}\n\n'
'-----------------\n'.format(' '.join(sys.argv),
taxonomy_folder,
database_folder,
log_file))
shared.give_user_feedback(message, log_file, quiet, show_time=False)
# Check diamond path.
error = check.check_diamond_binaries(path_to_diamond, log_file, quiet)
if error:
sys.exit(1)
# Check taxonomy folder.
taxonomy_folder_inspect = check.inspect_taxonomy_folder(taxonomy_folder)
if taxonomy_folder_inspect != [None]:
if len([file for file in taxonomy_folder_inspect if
file is not None]) > 0:
message = ('ERROR: taxonomy folder {0} exists already and '
'contains taxonomy files. Please supply a novel or '
'empty folder if you want to start fresh, or run '
'CAT prepare --existing.'
''.format(taxonomy_folder))
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = ('Taxonomy folder exists already. Taxonomy files will be '
'downloaded to it.')
shared.give_user_feedback(message, log_file, quiet)
database_folder_inspect = check.inspect_database_folder(database_folder)
# Check database folder.
if database_folder_inspect != [None]:
if len([file_ for file_ in database_folder_inspect if
file_ is not None]) > 0:
message = ('ERROR: database folder {0} exists already and '
'contains database files. Please supply a novel or '
'empty folder if you want to start fresh.'
''.format(database_folder))
shared.give_user_feedback(message, log_file, quiet, error=True)
sys.exit(1)
message = ('Database folder exists already. Database file will be '
'downloaded to it / constructed in it.')
shared.give_user_feedback(message, log_file, quiet)
# Check memory.
min_mem = 100
(total_memory, error) = check.check_memory(min_mem)
if | |
# Copyright 2021 Alibaba, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
import grpc
import unittest
import time
import random
from pyproximabe import *
from global_conf import GlobalConf
from collection_creator import CollectionCreator
import client_helper
OperationType = WriteRequest.OperationType
class TestIndexAgentBase(unittest.TestCase):
def setUp(self):
self.global_conf = GlobalConf()
self.creator = CollectionCreator()
self.client = client_helper.get_client(self.global_conf)
self.collection_name = "collection1"
self.collection_name2 = "collection2"
self.repository_name = "test_repo"
self.clean_env()
self.index_columns = ["column1"]
self.index_dimensions = [16]
self.schema = self.create_schema(self.collection_name, self.index_columns,
self.index_dimensions,
with_repo=self.with_repo)
status = self.client.create_collection(self.schema)
self.assertTrue(status.ok())
self.connection = self.creator.get_connection()
def tearDown(self):
self.clean_env()
def reconnect(self):
self.client = client_helper.get_client(self.global_conf)
def clean_env(self):
status, collections = self.client.list_collections()
self.assertTrue(status.ok())
for collection in collections:
status = self.client.drop_collection(collection.collection_config.collection_name)
self.assertTrue(status.ok())
def create_schema(self, collection_name, column_name, dims,
forward_columns=["col_a", "col_b"],
repository_name="test_repo",
with_repo=True):
return self.creator.create_schema(collection_name,
repository_table="test_collection",
repository_name=repository_name,
forward_columns=forward_columns,
index_columns=column_name,
index_dimensions=dims,
db_name="test_db",
with_repo=with_repo)
def create_schema1(self, collection_name, index_columns=None,
dimensions=None, forward_columns=None,
with_repo=True):
return self.creator.create_schema(collection_name,
repository_table="test_collection",
repository_name="test_repo",
forward_columns=forward_columns,
index_columns=index_columns,
index_dimensions=dimensions,
db_name="test_db",
with_repo=with_repo)
def create_all_index_data_types_schema(self, collection_name, dim,
with_repo=True):
index_data_types = [DataType.VECTOR_FP32,
DataType.VECTOR_FP16,
DataType.VECTOR_INT8,
DataType.VECTOR_INT4,
DataType.VECTOR_BINARY32,
DataType.VECTOR_BINARY64]
index_columns = []
index_dimensions = []
for i in range(0, len(index_data_types)):
index_columns.append('column_' + str(i))
index_dimensions.append(dim)
return self.creator.create_schema(collection_name,
repository_table="test_collection",
repository_name="test_repo",
forward_columns=["col_a", "col_b"],
index_columns=index_columns,
index_data_types=index_data_types,
index_dimensions=index_dimensions,
db_name="test_db",
with_repo=with_repo)
def create_all_forward_data_types_schema(self, collection_name, dim,
with_repo=True):
index_data_types = [DataType.VECTOR_FP32]
index_columns = ['column_0']
index_dimensions = [dim]
forward_columns = []
forward_cnt = 9
for i in range(0, forward_cnt):
forward_columns.append('forward_' + str(i))
return self.creator.create_schema(collection_name,
repository_table="test_collection",
repository_name="test_repo",
forward_columns=forward_columns,
index_columns=index_columns,
index_data_types=index_data_types,
index_dimensions=index_dimensions,
db_name="test_db",
with_repo=with_repo)
def create_single_request(self, magic_number, operation_type,
forwards = [1.234, 'abc'], lsn=10, is_bytes=False, is_vector=False):
index_tuple_metas = [['column1', DataType.VECTOR_FP32, self.index_dimensions[0]]]
index_tuple_types = ['string']
forward_tuple_names = ['col_a', 'col_b']
forward_tuple_types = [DataType.FLOAT,
DataType.STRING]
if not is_bytes:
feature = "[1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]"
else:
vector = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]
values = []
for value in vector:
values.append(struct.pack('f', value))
feature = b''.join(values)
if is_vector:
feature = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]
rows = [[1, operation_type, lsn, feature,
forwards[0], forwards[1]]
]
return self.creator.create_dataset_request(self.collection_name,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_batch_request(self, magic_number, count, operation_type,
forwards=[0.234, 'abc'],
lsn = 10,
index_value_base = 0):
index_tuple_metas = [['column1', DataType.VECTOR_FP32, self.index_dimensions[0]]]
index_tuple_types = ['string']
rows = []
forward_tuple_names=['col_a', 'col_b']
forward_tuple_types=[DataType.FLOAT,
DataType.STRING]
for i in range(1, count + 1):
rows.append([i, operation_type, lsn + i,
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]",
forwards[0] + i, forwards[1]])
return self.creator.create_dataset_request(self.collection_name,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_one_forward_request(self, collection,
magic_number, count, operation_type,
forwards=[0.234],
lsn = 10,
index_value_base = 0):
index_tuple_metas = [['column1', DataType.VECTOR_FP32, self.index_dimensions[0]]]
index_tuple_types = ['string']
rows = []
forward_tuple_names=['col_a']
forward_tuple_types=[DataType.FLOAT]
for i in range(1, count + 1):
rows.append([i, operation_type, lsn + i,
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]",
forwards[0] + i])
return self.creator.create_dataset_request(collection,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_multi_index_request(self, collection, dim,
magic_number, count, operation_type,
lsn = 10,
index_value_base = 0):
index_tuple_metas = [['index2', DataType.VECTOR_FP32, dim],
['index1', DataType.VECTOR_FP32, dim]]
index_tuple_names = ['index2', 'index1']
index_tuple_types = ['string', 'string']
rows = []
forward_tuple_names=['col_a', 'index1']
forward_tuple_types=[DataType.FLOAT, DataType.STRING]
for i in range(1, count + 1):
rows.append([i, operation_type, lsn + i,
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]",
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]",
0.234 + i,
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]"])
return self.creator.create_dataset_request(collection,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_all_operations_request(self,
magic_number,
count,
operation_types,
forwards=[0.234, 'abc'],
lsn = 10,
index_value_base = 0):
index_tuple_metas = [['column1', DataType.VECTOR_FP32, self.index_dimensions[0]]]
index_tuple_types = ['string']
forward_tuple_names = ['col_a', 'col_b']
forward_tuple_types = [DataType.FLOAT, DataType.STRING]
rows = []
lsn = 1
for i in range(1, count + 1):
idx = 1
for operation_type in operation_types:
rows.append([i, operation_type, lsn,
'[' + str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2]",
forwards[0] + idx, forwards[1]])
idx += 1
lsn += 1
return self.creator.create_dataset_request(self.collection_name,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_all_index_data_types_insert_request(self, magic_number, dim, count):
index_tuple_metas = []
index_tuple_types = []
index_data_types = [DataType.VECTOR_FP32,
DataType.VECTOR_FP16,
DataType.VECTOR_INT8,
DataType.VECTOR_INT4,
DataType.VECTOR_BINARY32,
DataType.VECTOR_BINARY64]
total_types = 6
for i in range(0, total_types):
index_tuple_metas.append(['column_' + str(i), index_data_types[i], dim])
forward_tuple_names = ['col_a', 'col_b']
forward_tuple_types = [DataType.FLOAT, DataType.STRING]
rows = []
for i in range(1, count + 1):
row = [i, OperationType.INSERT, 9 + i]
for j in range(0, total_types):
vec = str(i)
if j == 5:
vec = str(i)
elif j == 4:
vec = str(i) + ',' + str(i)
else:
for k in range(1, dim):
vec += ',' + str(i)
row.append('[' + vec + ']')
row.append(0.234 + i)
row.append('abc')
rows.append(row)
return self.creator.create_dataset_request(self.collection_name2,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = None,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_all_forward_data_types_insert_request(self, magic_number, dim, count):
index_tuple_metas = [['column_0', DataType.VECTOR_FP32, dim]]
index_tuple_types = ['string']
total_types = 9
forward_tuple_names = []
forward_tuple_types = [DataType.BINARY,
DataType.STRING,
DataType.BOOL,
DataType.INT32,
DataType.INT64,
DataType.UINT32,
DataType.UINT64,
DataType.FLOAT,
DataType.DOUBLE]
for i in range(0, total_types):
forward_tuple_names.append('forward_' + str(i))
rows = []
for i in range(1, count + 1):
row = [i, OperationType.INSERT, 9 + i]
vec = str(i)
for k in range(1, dim):
vec += ',' + str(i)
row.append('[' + vec + ']')
row.append(str(i).encode('UTF-8'))
row.append(str(i) * i)
if i % 2 == 1:
row.append(True)
else:
row.append(False)
row.append(i)
row.append(i * 10)
row.append(i * 100)
row.append(i * 1000)
row.append(i * 1.0)
row.append(i * 10.0)
rows.append(row)
return self.creator.create_dataset_request(self.collection_name2,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_tuple_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows,
with_repo=self.with_repo)
def create_request(self, collection_name,
magic_number, count, operation_type=None,
operation_types=None,
forward_tuple_names=['col_a', 'col_b'],
forward_tuple_types=None,
forwards=[0.234, 'abc'],
index_tuple_metas = None,
index_tuple_types = ['string'],
lsn = 10, index_value_base = 0,
vector_exception=False,
key_repeated=False,
empty_request=False):
rows = []
if not index_tuple_metas:
index_tuple_metas = [['column1', DataType.VECTOR_FP32, self.index_dimensions[0]]]
if not forward_tuple_types:
forward_tuple_types=[DataType.FLOAT, DataType.STRING]
index_num = len(index_tuple_metas)
index_types = []
if index_tuple_types:
index_types = index_tuple_types
if not operation_types:
operation_types = []
for i in range(1, count + 1):
operation_types.append(operation_type)
for i in range(1, count + 1):
row = []
if not key_repeated:
row = [i + index_value_base, operation_types[i - 1], lsn + i]
else:
row = [1, operation_types[i - 1], lsn + i]
for j in range(0, index_num):
vector = str(i + index_value_base) + ",1,1,1,1,1,1,1,2,2,2,2,2,2,2,2"
if vector_exception:
vector += "1,1,1"
row.append('[' + vector + ']')
row.append(forwards[0] + i)
row.append(forwards[1])
if not empty_request:
rows.append(row)
return self.creator.create_dataset_request(collection_name,
magic_number,
index_tuple_metas = index_tuple_metas,
index_tuple_types = index_types,
forward_tuple_names = forward_tuple_names,
forward_tuple_types = forward_tuple_types,
rows = rows)
def simple_query(self, topk=10):
features = [[1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,3]]
return self.client.query(self.collection_name,
'column1',
features,
data_type=DataType.VECTOR_FP32,
dimension=16,
batch_count=1,
topk=topk)
def query(self, index_column, topk, dim, feature_type):
features = []
if feature_type == DataType.VECTOR_INT4:
dim /= 2
elif feature_type == DataType.VECTOR_BINARY32:
dim /= 32
elif feature_type == DataType.VECTOR_BINARY64:
dim /= 64
for i in range(0, int(dim)):
features.append(1)
# for f in features:
# if feature_type == common_pb2.FeatureType.FT_FP32:
# fea_bytes += struct.pack('f', f)
# elif feature_type == common_pb2.FeatureType.FT_FP16:
# fea_bytes += struct.pack('h', 0)
# elif feature_type == common_pb2.FeatureType.FT_INT8:
# fea_bytes += struct.pack('b', f)
# elif feature_type == common_pb2.FeatureType.FT_INT4:
# fea_bytes += struct.pack('b', 17)
# elif feature_type == common_pb2.FeatureType.FT_BINARY32:
# fea_bytes += struct.pack('I', f)
# elif feature_type == common_pb2.FeatureType.FT_BINARY64:
# fea_bytes += struct.pack('L', f)
return self.client.query(self.collection_name2,
index_column,
features,
data_type=feature_type,
dimension=dim,
batch_count=1,
topk=topk)
def get_magic_number(self, collection_name):
status, collection = self.client.describe_collection(collection_name)
self.assertTrue(status.ok())
return collection.magic_number
class TestIndexAgentDatabase(TestIndexAgentBase):
def setUp(self):
self.with_repo = True
super().setUp()
def test_single_insert(self):
magic_number = self.get_magic_number(self.collection_name)
req = self.create_single_request(magic_number, OperationType.INSERT)
logging.info("request: %s", req)
response = self.client.write(req)
logging.info("process result: %s", response)
self.assertTrue(response.ok())
time.sleep(1)
status, response = self.simple_query()
self.assertTrue(status.ok())
logging.info("query result: %s", response)
results = response.results
self.assertEqual(len(results), 1)
documents = results[0]
self.assertEqual(len(documents), 1)
self.assertEqual(documents[0].primary_key, 1)
self.assertEqual(documents[0].score, 1.0)
self.assertEqual(len(documents[0].forward_column_values), 2)
self.assertAlmostEqual(documents[0].forward_column_values['col_a'], 1.234, delta=0.000001)
self.assertEqual(documents[0].forward_column_values['col_b'], 'abc')
def test_single_insert_with_bytes(self):
magic_number = self.get_magic_number(self.collection_name)
req = self.create_single_request(magic_number,
OperationType.INSERT, is_bytes=True)
logging.info("request: %s", | |
localctx = SBHasmParser.StepContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_step)
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self.match(SBHasmParser.STEP)
self.state = 104
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 102
self.directions()
pass
elif token in [SBHasmParser.MEM]:
self.state = 103
self.mem()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DirectionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def COMMA(self):
return self.getToken(SBHasmParser.COMMA, 0)
def directions(self):
return self.getTypedRuleContext(SBHasmParser.DirectionsContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_directions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDirections" ):
listener.enterDirections(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDirections" ):
listener.exitDirections(self)
def directions(self):
localctx = SBHasmParser.DirectionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_directions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 106
self.direction()
self.state = 109
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SBHasmParser.COMMA:
self.state = 107
self.match(SBHasmParser.COMMA)
self.state = 108
self.directions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabelContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LABEL(self):
return self.getToken(SBHasmParser.LABEL, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_label
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabel" ):
listener.enterLabel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabel" ):
listener.exitLabel(self)
def label(self):
localctx = SBHasmParser.LabelContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_label)
try:
self.enterOuterAlt(localctx, 1)
self.state = 111
self.match(SBHasmParser.LABEL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CondContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IF(self):
return self.getToken(SBHasmParser.IF, 0)
def expressions(self):
return self.getTypedRuleContext(SBHasmParser.ExpressionsContext,0)
def COLON(self):
return self.getToken(SBHasmParser.COLON, 0)
def EOL(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.EOL)
else:
return self.getToken(SBHasmParser.EOL, i)
def ENDIF(self):
return self.getToken(SBHasmParser.ENDIF, 0)
def line(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.LineContext)
else:
return self.getTypedRuleContext(SBHasmParser.LineContext,i)
def sonst(self):
return self.getTypedRuleContext(SBHasmParser.SonstContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_cond
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCond" ):
listener.enterCond(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCond" ):
listener.exitCond(self)
def cond(self):
localctx = SBHasmParser.CondContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_cond)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 113
self.match(SBHasmParser.IF)
self.state = 114
self.expressions()
self.state = 115
self.match(SBHasmParser.COLON)
self.state = 116
self.match(SBHasmParser.EOL)
self.state = 118
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 117
self.line()
self.state = 120
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
self.state = 129
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SBHasmParser.ELSE:
self.state = 122
self.sonst()
self.state = 123
self.match(SBHasmParser.EOL)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 124
self.line()
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << SBHasmParser.COMMENT) | (1 << SBHasmParser.JUMP) | (1 << SBHasmParser.STEP) | (1 << SBHasmParser.PICKUP) | (1 << SBHasmParser.IF) | (1 << SBHasmParser.DROP) | (1 << SBHasmParser.WRITE) | (1 << SBHasmParser.TAKE) | (1 << SBHasmParser.GIVE) | (1 << SBHasmParser.END) | (1 << SBHasmParser.MEM) | (1 << SBHasmParser.LABEL) | (1 << SBHasmParser.EOL) | (1 << SBHasmParser.LISTEN) | (1 << SBHasmParser.TELL) | (1 << SBHasmParser.GAMECOMMENT) | (1 << SBHasmParser.GAMECOMMENTDEF))) != 0)):
break
self.state = 131
self.match(SBHasmParser.ENDIF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.ExpressionContext)
else:
return self.getTypedRuleContext(SBHasmParser.ExpressionContext,i)
def EOL(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.EOL)
else:
return self.getToken(SBHasmParser.EOL, i)
def AND(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.AND)
else:
return self.getToken(SBHasmParser.AND, i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(SBHasmParser.OR)
else:
return self.getToken(SBHasmParser.OR, i)
def getRuleIndex(self):
return SBHasmParser.RULE_expressions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressions" ):
listener.enterExpressions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressions" ):
listener.exitExpressions(self)
def expressions(self):
localctx = SBHasmParser.ExpressionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_expressions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 133
self.expression()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SBHasmParser.AND or _la==SBHasmParser.OR:
self.state = 134
_la = self._input.LA(1)
if not(_la==SBHasmParser.AND or _la==SBHasmParser.OR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 135
self.match(SBHasmParser.EOL)
self.state = 136
self.expression()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMPARE(self):
return self.getToken(SBHasmParser.COMPARE, 0)
def direction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.DirectionContext)
else:
return self.getTypedRuleContext(SBHasmParser.DirectionContext,i)
def items(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.ItemsContext)
else:
return self.getTypedRuleContext(SBHasmParser.ItemsContext,i)
def number(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SBHasmParser.NumberContext)
else:
return self.getTypedRuleContext(SBHasmParser.NumberContext,i)
def getRuleIndex(self):
return SBHasmParser.RULE_expression
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpression" ):
listener.enterExpression(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpression" ):
listener.exitExpression(self)
def expression(self):
localctx = SBHasmParser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 142
self.direction()
pass
elif token in [SBHasmParser.MYITEM, SBHasmParser.NOTHING, SBHasmParser.ITEM, SBHasmParser.MEM]:
self.state = 143
self.items()
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 144
self.number()
pass
else:
raise NoViableAltException(self)
self.state = 147
self.match(SBHasmParser.COMPARE)
self.state = 151
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.DIRECTION]:
self.state = 148
self.direction()
pass
elif token in [SBHasmParser.MYITEM, SBHasmParser.NOTHING, SBHasmParser.ITEM, SBHasmParser.MEM]:
self.state = 149
self.items()
pass
elif token in [SBHasmParser.NUMBER]:
self.state = 150
self.number()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CommentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def COMMENT(self):
return self.getToken(SBHasmParser.COMMENT, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_comment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterComment" ):
listener.enterComment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitComment" ):
listener.exitComment(self)
def comment(self):
localctx = SBHasmParser.CommentContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_comment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.match(SBHasmParser.COMMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ItemsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def item(self):
return self.getTypedRuleContext(SBHasmParser.ItemContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def MYITEM(self):
return self.getToken(SBHasmParser.MYITEM, 0)
def NOTHING(self):
return self.getToken(SBHasmParser.NOTHING, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_items
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterItems" ):
listener.enterItems(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitItems" ):
listener.exitItems(self)
def items(self):
localctx = SBHasmParser.ItemsContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_items)
try:
self.enterOuterAlt(localctx, 1)
self.state = 159
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SBHasmParser.ITEM]:
self.state = 155
self.item()
pass
elif token in [SBHasmParser.MEM]:
self.state = 156
self.mem()
pass
elif token in [SBHasmParser.MYITEM]:
self.state = 157
self.match(SBHasmParser.MYITEM)
pass
elif token in [SBHasmParser.NOTHING]:
self.state = 158
self.match(SBHasmParser.NOTHING)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ItemContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ITEM(self):
return self.getToken(SBHasmParser.ITEM, 0)
def getRuleIndex(self):
return SBHasmParser.RULE_item
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterItem" ):
listener.enterItem(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitItem" ):
listener.exitItem(self)
def item(self):
localctx = SBHasmParser.ItemContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_item)
try:
self.enterOuterAlt(localctx, 1)
self.state = 161
self.match(SBHasmParser.ITEM)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class WriteContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WRITE(self):
return self.getToken(SBHasmParser.WRITE, 0)
def number(self):
return self.getTypedRuleContext(SBHasmParser.NumberContext,0)
def direction(self):
return self.getTypedRuleContext(SBHasmParser.DirectionContext,0)
def mem(self):
return self.getTypedRuleContext(SBHasmParser.MemContext,0)
def getRuleIndex(self):
return SBHasmParser.RULE_write
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWrite" ):
listener.enterWrite(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWrite" ):
listener.exitWrite(self)
def write(self):
localctx = SBHasmParser.WriteContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_write)
try:
self.enterOuterAlt(localctx, 1)
self.state = 163
self.match(SBHasmParser.WRITE)
self.state = | |
<gh_stars>1-10
import pytest
import numpy as np
from opentrons.deck_calibration import endpoints
from opentrons.config import robot_configs
from opentrons import types
# Note that several tests in this file have target/expected values that do not
# accurately reflect robot operation, because of differences between return
# values from the driver during simulating vs. non-simulating modes. In
# particular, during simulating mode the driver's `position` method returns
# the xyz position of the tip of the pipette, but during non-simulating mode
# it returns a position that corresponds roughly to the gantry (e.g.: where the
# Smoothie board sees the position of itself--after a fashion). Simulating mode
# should be replaced with something that accurately reflects actual robot
# operation, and then these tests should be revised to match expected reality.
# ------------ Function tests (unit) ----------------------
async def test_add_and_remove_tip(dc_session, instruments):
hardware = dc_session.adapter
hardware.reset()
hardware.cache_instruments({
types.Mount.LEFT: 'p10_single'})
pip = hardware.attached_instruments[types.Mount.LEFT]
dc_session.current_mount = types.Mount.LEFT
mount = dc_session.current_mount
dc_session.pipettes = {mount: pip}
# Check malformed packet
res0 = await endpoints.attach_tip({})
assert res0.success is False
assert dc_session.tip_length is None
assert hardware.attached_instruments[mount]['has_tip'] is False
# Check correct attach command
tip_length = 50
res1 = await endpoints.attach_tip({'tipLength': tip_length})
assert res1.success is True
assert dc_session.tip_length == tip_length
assert hardware.attached_instruments[mount]['has_tip'] is True
# Check command with tip already attached
res2 = await endpoints.attach_tip({'tipLength': tip_length + 5})
assert res2.success is True
assert dc_session.tip_length == tip_length + 5
assert hardware.attached_instruments[mount]['has_tip'] is True
# Check correct detach command
res3 = await endpoints.detach_tip({})
assert res3.success is True
assert dc_session.tip_length is None
assert hardware.attached_instruments[mount]['has_tip'] is False
# Check command with no tip
res4 = await endpoints.detach_tip({})
assert res4.success is True
assert dc_session.tip_length is None
assert hardware.attached_instruments[mount]['has_tip'] is False
async def test_save_xy(dc_session, instruments):
hardware = dc_session.adapter
mount = types.Mount.LEFT
hardware.reset()
hardware.cache_instruments({
mount: 'p10_single'})
pip = hardware.attached_instruments[mount]
dc_session.pipettes = {mount: pip}
dc_session.current_mount = mount
dc_session.tip_length = 25
dc_session.pipettes.get(mount)['has_tip'] = True
dc_session.pipettes.get(mount)['tip_length'] = dc_session.tip_length
hardware.add_tip(types.Mount.LEFT, dc_session.tip_length)
hardware.home()
x = 100
y = 101
hardware.move_to(types.Mount.LEFT, types.Point(x=x, y=y))
point = '1'
data = {
'point': point
}
await endpoints.save_xy(data)
actual = dc_session.points[point]
coordinates = hardware.gantry_position(types.Mount.LEFT)
expected = (
coordinates.x,
coordinates.y)
assert actual == expected
async def test_save_z(dc_session, monkeypatch, instruments):
dc_session.adapter.reset()
hardware = dc_session.adapter
model = 'p10_single_v1'
# Z values were bleeding in from other tests, mock robot configs
# to encapsulate this test
fake_config = robot_configs.load()
monkeypatch.setattr(hardware, 'config', fake_config)
mount = types.Mount.LEFT
hardware.reset()
hardware.cache_instruments({
mount: 'p10_single'})
pip = hardware.attached_instruments[mount]
dc_session.pipettes = {mount: pip}
dc_session.current_mount = mount
dc_session.current_model = model
dc_session.tip_length = 25
dc_session.pipettes.get(mount)['has_tip'] = True
dc_session.pipettes.get(mount)['tip_length'] = dc_session.tip_length
z_target = 80.0
hardware.home()
# Unsure whether to use move_to or move_rel
hardware.move_to(
types.Mount.LEFT, types.Point(x=0, y=0, z=z_target))
await endpoints.save_z({})
new_z = dc_session.z_value
expected_z = z_target
assert new_z == expected_z
async def test_save_calibration_file(dc_session, monkeypatch):
hardware = dc_session.adapter
hardware.reset()
expected_pos = endpoints.expected_points()
dc_session.points = {
k: (v[0], v[1] + 0.3)
for k, v in expected_pos.items()}
dc_session.z_value = 0.2
persisted_data = []
def dummy_save(config, filename=None, tag=None):
nonlocal persisted_data
persisted_data.append((config, filename, tag))
monkeypatch.setattr(robot_configs, 'save_deck_calibration', dummy_save)
await endpoints.save_transform({})
in_memory = hardware.config.gantry_calibration
assert len(persisted_data) == 1 # back up now happens at beginning of sess
assert persisted_data[0][0].gantry_calibration == in_memory
expected = [[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.3],
[0.0, 0.0, 1.0, 0.2],
[0.0, 0.0, 0.0, 1.0]]
assert np.allclose(in_memory, expected)
async def test_transform_calculation(dc_session, monkeypatch):
# This transform represents a 5 degree rotation, with a shift in x, y, & z.
# Values for the points and expected transform come from a hand-crafted
# transformation matrix and the points that would generate that matrix.
hardware = dc_session.adapter
cos_5deg_p = 0.9962
sin_5deg_p = 0.0872
sin_5deg_n = -sin_5deg_p
const_zero = 0.0
const_one_ = 1.0
delta_x___ = 0.3
delta_y___ = 0.4
delta_z___ = 0.5
expected_transform = [
[cos_5deg_p, sin_5deg_p, const_zero, delta_x___],
[sin_5deg_n, cos_5deg_p, const_zero, delta_y___],
[const_zero, const_zero, const_one_, delta_z___],
[const_zero, const_zero, const_zero, const_one_]]
dc_session.z_value = 0.5
dc_session.points = {
'1': [13.16824337, 8.30855312],
'2': [380.50507635, -23.82925545],
'3': [34.87002331, 256.36103295]
}
await endpoints.save_transform({})
assert np.allclose(hardware.config.gantry_calibration, expected_transform)
# ------------ Session and token tests ----------------------
@pytest.mark.parametrize('left,right,correct', [
('p300_multi_v1', 'p10_single_v1', 'p10_single_v1'),
('p300_single_v1', 'p10_single_v1', 'p10_single_v1'),
('p10_multi_v1', 'p300_multi_v1', 'p300_multi_v1'),
(None, 'p10_single_v1', 'p10_single_v1'),
('p300_multi_v1', None, 'p300_multi_v1'),
('p10_single_v1', 'p300_multi_v1', 'p10_single_v1')])
async def test_create_session(hardware, monkeypatch, left, right, correct):
"""
Tests that the call to initiate a session manager for factory
calibration returns a good token, along with the correct preferred pipette
"""
dummy_token = '<PASSWORD>'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
# each tuple in this list is (left-mount, right-mount, correct-choice)
hardware.managed_obj._backend._attached_instruments = {
types.Mount.LEFT: {'model': left, 'id': None},
types.Mount.RIGHT: {'model': right, 'id': None}
}
await hardware.cache_instruments()
resp = await endpoints.create_session(False, hardware)
endpoints.session_wrapper.session = None
assert resp.token == dummy_token
assert resp.pipette.get('model') == correct
async def test_create_session_fail(monkeypatch, hardware):
"""
Tests that the call to initiate a session manager for factory
calibration fails with forbidden error
"""
from opentrons.legacy_api.robot import Robot
dummy_token = 'Test <PASSWORD>'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
def dummy_get_pipettes(self):
return {
'left': {
'mount_axis': 'z',
'plunger_axis': 'b',
'model': None
},
'right': {
'mount_axis': 'a',
'plunger_axis': 'c',
'model': None
}
}
monkeypatch.setattr(Robot, 'get_attached_pipettes', dummy_get_pipettes)
with pytest.raises(endpoints.SessionForbidden,
match="Error, pipette not recognized"):
await endpoints.create_session(force=False, hardware=hardware)
assert endpoints.session_wrapper.session is None
async def test_release(hardware, monkeypatch, dc_session):
"""
Tests that the call to initiate a session manager for factory
calibration returns an error if a session is in progress and that calling
release will enable starting a new session
"""
with pytest.raises(endpoints.SessionInProgress,
match="Error, session in progress"):
await endpoints.create_session(False, hardware)
# Release
release_result = await endpoints.dispatch(dc_session.id, "release", None)
assert release_result.success is True
assert endpoints.session_wrapper.session is None
# Set up pipettes
await hardware.cache_instruments({
types.Mount.RIGHT: 'p300_multi'
})
# Create a new session
create_result = await endpoints.create_session(False, hardware)
assert create_result is not None
async def test_forcing_new_session(hardware, monkeypatch, dc_session):
"""
Tests that the call to initiate a session manager for factory
calibration returns an error if a session is in progress, and can be
overridden.
"""
test_model = 'p300_multi_v1'
dummy_token = '<PASSWORD>'
def uuid_mock():
return dummy_token
async def mock_release(data):
return data
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
with pytest.raises(endpoints.SessionInProgress,
match="Error, session in progress"):
await endpoints.create_session(False, hardware)
monkeypatch.setattr(endpoints, 'release', mock_release)
# Force creation of new session
resp = await endpoints.create_session(True, hardware)
assert resp.token == dummy_token
assert resp.pipette == {
'mount': 'right',
'model': test_model
}
async def test_incorrect_token(dc_session):
"""
Test that putting in an incorrect token for a dispatch call does not work
after a session was already created with a different token.
"""
with pytest.raises(endpoints.SessionForbidden,
match="Invalid token: FAKE TOKEN"):
await endpoints.dispatch(token='<PASSWORD>',
command='init pipette',
command_data={
'mount': 'left',
'model': 'p10_single_v1'
})
async def test_invalid_command(dc_session):
"""
Test that an unknown command to dispatch will raise an error.
"""
with pytest.raises(endpoints.SessionForbidden,
match="Command \"do something wrong\""):
await endpoints.dispatch(token=dc_session.id,
command='do something wrong',
command_data={
'mount': 'left',
'model': 'p10_single_v1'
})
# ------------ Router tests (integration) ----------------------
# TODO(mc, 2018-05-02): this does not adequately test z to smoothie axis logic
async def test_set_and_jog_integration(hardware, monkeypatch):
"""
Test that the jog function works.
Note that in order for the jog function to work, the following must
be done:
1. Create a session manager
Then jog requests will work as expected.
"""
test_model = 'p300_multi'
# Why does this need to be awaited for a synch adapter
await hardware.cache_instruments(
{types.Mount.RIGHT: test_model})
dummy_token = 'Test <PASSWORD>'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
token_res = await endpoints.create_session(False, hardware)
assert token_res.token == dummy_token
token = token_res.token
axis = 'z'
direction = 1
step = 3
# left pipette z carriage motor is smoothie axis "Z", right is "A"
sess = endpoints.session_wrapper.session
sess.adapter.home()
prior_x, prior_y, prior_z = endpoints.position(
sess.current_mount, sess.adapter, sess.cp)
resp = await endpoints.dispatch(
token=token,
command='jog',
command_data={
'axis': axis,
'direction': direction,
'step': step
})
assert resp.success is True
msg = resp.message
assert '{}'.format((prior_x, prior_y, prior_z + step)) in msg
endpoints.session_wrapper.session = None
@pytest.mark.parametrize(argnames="command_data",
argvalues=[
{},
{"point": "Z"},
{"point": "att"},
{"point": None}
])
async def test_move_no_point(command_data, dc_session):
resp = await endpoints.dispatch(
token=dc_session.id,
command='move',
command_data=command_data)
assert resp.success is False
assert resp.message == '"point" must be one of "1", "2", "3",' \
' "safeZ", "attachTip"'
async def test_move_basic(dc_session):
dc_session.current_mount = endpoints.Mount.RIGHT
resp = await endpoints.dispatch(
token=dc_session.id,
command='move',
command_data={
"point": "attachTip"
})
assert resp.success is True
assert resp.message == "Moved to (200, 90, 130)"
async def test_move_basic_typed(dc_session):
dc_session.current_mount = endpoints.Mount.RIGHT
resp = await endpoints.dispatch(
token=dc_session.id,
| |
if r < 0:
if self.console:
print("Found bad r", r, r_dry, sp)
raised = True
if np.abs(ss-S0) > 1e-4:
if self.console:
print("Found S discrepancy", ss, S0, r_dry)
raised = True
if raised:
raise ParcelModelError("Couldn't calculate initial aerosol population wet sizes.")
out['r0s'] = r0s
# c) compute equilibrium droplet water content
water_vol = lambda r0, r_dry, Ni: (4.*np.pi/3.)*rho_w*Ni*(r0**3 - r_dry**3)
wc0 = np.sum([water_vol(r0, r_dry, Ni) for r0, r_dry, Ni in zip(r0s, r_drys, Nis)])
wc0 /= rho_air(T0, P0, 0.)
# d) compute initial ice water content
wi0 = 0.0
# e) concatenate into initial conditions arrays
y0 = [z0, P0, T0, wv0, wc0, wi0, S0]
if self.console:
print("PARCEL INITIAL CONDITIONS")
print((" " + "{:>9} "*6).format("P (hPa)", "T (K)", "wv (g/kg)",
"wc (g/kg)", "wi (g/kg)", "S"))
print(" " + "{:9.1f} {:9.2f} {:9.1e} {:9.1e} {:9.1e} {:9.3f}".format(
P0/100., T0, wv0*1e3, wc0*1e3, wi0*1e3, S0))
y0.extend(r0s)
y0 = np.array(y0)
out['y0'] = y0
self.y0 = y0
# Store the model configuration
self._r0s = r0s
self._r_drys = r_drys
self._kappas = kappas
self._Nis = Nis
self._nr = len(r_drys)
self._model_set = True
if self.console:
print("Initial conditions set successfully.")
def run(self, t_end,
output_dt=1., solver_dt=None,
max_steps=1000, solver="odeint", output_fmt="dataframes",
terminate=False, terminate_depth=100., **solver_args):
""" Run the parcel model simulation.
Once the model has been instantiated, a simulation can immediately be
performed by invoking this method. The numerical details underlying the
simulation and the times over which to integrate can be flexibly set
here.
**Time** -- The user must specify two timesteps: `output_dt`, which is the
timestep between output snapshots of the state of the parcel model, and
`solver_dt`, which is the the interval of time before the ODE integrator
is paused and re-started. It's usually okay to use a very large `solver_dt`,
as `output_dt` can be interpolated from the simulation. In some cases though
a small `solver_dt` could be useful to force the solver to use smaller
internal timesteps.
**Numerical Solver** -- By default, the model will use the `odeint` wrapper
of LSODA shipped by default with scipy. Some fine-tuning of the solver tolerances
is afforded here through the `max_steps`. For other solvers, a set of optional
arguments `solver_args` can be passed.
**Solution Output** -- Several different output formats are available by default.
Additionally, the output arrays are saved with the `ParcelModel` instance so they
can be used later.
Parameters
----------
t_end : float
Total time over interval over which the model should be integrated
output_dt : float
Timestep intervals to report model output.
solver_dt : float
Timestep interval for calling solver integration routine.
max_steps : int
Maximum number of steps allowed by solver to satisfy error tolerances
per timestep.
solver : {'odeint', 'lsoda', 'lsode', 'vode', cvode'}
Choose which numerical solver to use:
* `'odeint'`: LSODA implementation from ODEPACK via
SciPy's integrate module
* `'lsoda'`: LSODA implementation from ODEPACK via odespy
* `'lsode'`: LSODE implementation from ODEPACK via odespy
* `'vode'` : VODE implementation from ODEPACK via odespy
* `'cvode'` : CVODE implementation from Sundials via Assimulo
* `'lsodar'` : LSODAR implementation from Sundials via Assimulo
output_fmt : str, one of {'dataframes', 'arrays', 'smax'}
Choose format of solution output.
terminate : boolean
End simulation at or shortly after a maximum supersaturation has been achieved
terminate_depth : float, optional (default=100.)
Additional depth (in meters) to integrate after termination criterion
eached.
Returns
-------
DataFrames, array, or float
Depending on what was passed to the *output* argument, different
types of data might be returned:
- `dataframes': (default) will process the output into
two pandas DataFrames - the first one containing profiles
of the meteorological quantities tracked in the model,
and the second a dictionary of DataFrames with one for
each AerosolSpecies, tracking the growth in each bin
for those species.
- 'arrays': will return the raw output from the solver
used internally by the parcel model - the state vector
`y` and the evaluated timesteps converted into height
coordinates.
- 'smax': will only return the maximum supersaturation
value achieved in the simulation.
Raises
------
ParcelModelError
The parcel model failed to complete successfully or failed to initialize.
See Also
--------
der : right-hand side derivative evaluated during model integration.
"""
from . integrator import Integrator
if output_fmt not in ["dataframes", "arrays", "smax"]:
raise ParcelModelError("Invalid value ('%s') specified for output format." % output)
if solver_dt is None:
solver_dt = 10.*output_dt
if not self._model_set:
self._setup_run()
y0 = self.y0
r_drys = self._r_drys
kappas = self._kappas
Nis = self._Nis
nr = self._nr
# Setup/run integrator
try:
from .parcel_aux import der as der_fcn
except ImportError:
print("Could not load Cython derivative; using Python version.")
from .parcel import der as der_fcn
# Hack in Python derivative function
# der_fcn = der
# Is the updraft speed a function of time?
v_is_func = hasattr(self.V, '__call__')
if v_is_func: # Re-wrap the function to correctly figure out V
orig_der_fcn = der_fcn
def der_fcn(y, t, *args):
V_t = self.V(t)
args[3] = V_t
return orig_der_fcn(y, t, *args)
# Will the simulation terminate early?
if not terminate:
terminate_depth = 0.
else:
if terminate_depth <= 0.:
raise ParcelModelError("`terminate_depth` must be greater than 0!")
if self.console:
print()
print("Integration control")
print("----------------------------")
print(" output dt: ", output_dt)
print(" max solver dt: ", solver_dt)
print(" solver int steps: ", int(solver_dt/output_dt))
print(" termination: %r (%5dm)" % (terminate, terminate_depth))
args = [nr, r_drys, Nis, self.V, kappas, self.accom]
integrator_type = Integrator.solver(solver)
integrator = integrator_type(der_fcn, output_dt, solver_dt, y0, args,
terminate=terminate, terminate_depth=terminate_depth,
console=self.console,
**solver_args)
success = False
try:
# Pack args as tuple for solvers
args = tuple(args)
if self.console:
print("\nBEGIN INTEGRATION ->\n")
x, t, success = integrator.integrate(t_end)
except ValueError as e:
raise ParcelModelError("Something failed during model integration: %r" % e)
finally:
if not success:
raise ParcelModelError("Something failed during model integration.")
# Success if reached this point!
if self.console:
print("\nEND INTEGRATION <-\n")
self.x = x
self.heights = self.x[:, c.STATE_VAR_MAP['z']]
self.time = t
if output_fmt == "dataframes":
return output.parcel_to_dataframes(self)
elif output_fmt == "arrays":
return self.x, self.heights
elif output_fmt == "smax":
S = self.x[:, c.STATE_VAR_MAP['S']]
return S.max()
def write_summary(self, parcel_data, aerosol_data, out_filename):
""" Write a quick and dirty summary of given parcel model output to the
terminal.
"""
from .activation import lognormal_activation
# Check if parent dir of out_filename exists, and if not,
# create it
out_dir = os.path.dirname(out_filename)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Open a file to write to
with open(out_filename, 'w') as out_file:
# Write header
out_file.write("PARCEL MODEL\n")
out_file.write("--------------------------------------\n")
# Write initial conditions
out_file.write("V = %f\n" % self.V)
out_file.write("P0 = %f\n" % self.P0)
out_file.write("T0 = %f\n" % self.T0)
out_file.write("S0 = %f\n" % self.S0)
out_file.write("--------------------------------------\n")
# Write aerosol details
for aerosol in self.aerosols:
out_file.write(aerosol.species+" = "+aerosol.summary_str()+"\n")
out_file.write("--------------------------------------\n")
# Write simulation summary results
# 1) Maximum supersaturation in parcel
S_max = parcel_data['S'].max()
S_max_idx = np.argmax(parcel_data.S)
out_file.write("S_max = %f\n" % S_max)
# 2) Activated fraction of each species
T_at_S_max = parcel_data['T'].ix[S_max_idx]
total_number = 0.0
total_activated = 0.0
for aerosol in self.aerosols:
act_frac = lognormal_activation(S_max, aerosol.mu*1e-6, aerosol.sigma,
aerosol.N, aerosol.kappa, T=T_at_S_max)
act_num = act_frac*aerosol.N
out_file.write("%s - eq_act_frac = %f (%3.2f/%3.2f)\n" %
(aerosol.species, act_frac, act_num, aerosol.N))
total_number += aerosol.N
total_activated += act_num
total_act_frac = total_activated/total_number
out_file.write("Total activated fraction = %f (%3.2f/%3.2f)\n" %
(total_act_frac, total_activated, total_number))
def save(self, filename=None, format="nc", other_dfs=None):
output.write_parcel_output(filename=filename, format=format, parcel=self,
other_dfs=other_dfs)
@staticmethod
def write_csv(parcel_data, aerosol_data, output_dir=None):
"""Write output to CSV files.
Utilize pandas fast output procedures to write the model run output to a
set of CSV files. Written as a static method so that any prior saved
parcel model output can be saved to disk in batch, even after its
associated model has been destroyed.
**Args:
* *parcel_data* -- Pandas DataFrame of the parcel thermodynamic profile
* *aerosol_data* -- dictionary of pandas DataFrames with the aerosol radii \
at each model step
* *output_dir* -- String to location where output should be saved; if not \
provided then the model will save to the current path.
"""
if not output_dir:
output_dir = os.getcwd()
# Write parcel data
parcel_data.to_csv(os.path.join(output_dir, "parcel.csv"))
# Write aerosol data
for | |
# long time
sage: G.shortest_path_lengths(0, by_weight=True)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 2}
Using a weight function::
sage: D = DiGraph([(0,1,{'weight':1}),(1,2,{'weight':3}),(0,2,{'weight':5})])
sage: weight_function = lambda e:e[2]['weight']
sage: D.shortest_path_lengths(1, algorithm='Dijkstra_NetworkX', by_weight=False)
{1: 0, 2: 1}
sage: D.shortest_path_lengths(0, weight_function=weight_function)
{0: 0, 1: 1, 2: 4}
sage: D.shortest_path_lengths(1, weight_function=weight_function)
{1: 0, 2: 3}
Negative weights::
sage: D = DiGraph([(0,1,{'weight':-1}),(1,2,{'weight':3}),(0,2,{'weight':5})])
sage: D.shortest_path_lengths(0, weight_function=weight_function)
{0: 0, 1: -1, 2: 2}
Negative cycles::
sage: D = DiGraph([(0,1,{'weight':-5}),(1,2,{'weight':3}),(2,0,{'weight':1})])
sage: D.shortest_path_lengths(0, weight_function=weight_function)
Traceback (most recent call last):
...
ValueError: the graph contains a negative cycle
Checking that distances are equal regardless of the algorithm used::
sage: g = graphs.Grid2dGraph(5,5)
sage: d1 = g.shortest_path_lengths((0,0), algorithm="BFS")
sage: d2 = g.shortest_path_lengths((0,0), algorithm="Dijkstra_NetworkX")
sage: d3 = g.shortest_path_lengths((0,0), algorithm="Dijkstra_Boost")
sage: d4 = g.shortest_path_lengths((0,0), algorithm="Bellman-Ford_Boost")
sage: d1 == d2 == d3 == d4
True
"""
if weight_function is not None:
by_weight = True
elif by_weight:
def weight_function(e):
return 1 if e[2] is None else e[2]
else:
def weight_function(e):
return 1
if algorithm is None and not by_weight:
algorithm = 'BFS'
if by_weight and check_weight:
self._check_weight_function(weight_function)
if algorithm == 'BFS':
if by_weight:
raise ValueError("the 'BFS' algorithm does not work on weighted graphs")
return self._backend.shortest_path_all_vertices(u, cutoff=None, distance_flag=True)
elif algorithm == 'Dijkstra_NetworkX':
import networkx
# If this is not present, an error might be raised by NetworkX
if self.num_verts() == 1 and next(self.vertex_iterator()) == u:
return {u: [u]}
if by_weight:
if self.is_directed():
G = networkx.DiGraph([(e[0], e[1], {'weight': weight_function(e)}) for e in self.edge_iterator()])
else:
G = networkx.Graph([(e[0], e[1], {'weight': weight_function(e)}) for e in self.edge_iterator()])
else:
# Needed to remove labels.
if self.is_directed():
G = networkx.DiGraph(list(self.edges(labels=False, sort=False)))
else:
G = networkx.Graph(list(self.edges(labels=False, sort=False)))
G.add_nodes_from(self)
return networkx.single_source_dijkstra_path_length(G, u)
elif algorithm in ['Dijkstra_Boost', 'Bellman-Ford_Boost', None]:
from sage.graphs.base.boost_graph import shortest_paths
return shortest_paths(self, u, weight_function, algorithm)[0]
else:
raise ValueError('unknown algorithm "{}"'.format(algorithm))
def shortest_path_all_pairs(self, by_weight=False, algorithm=None,
weight_function=None, check_weight=True):
r"""
Return a shortest path between each pair of vertices.
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, the edges
in the graph are weighted, otherwise all edges have weight 1
- ``algorithm`` -- string (default: ``None``); one of the following
algorithms:
- ``'BFS'``: the computation is done through a BFS centered on each
vertex successively. Works only if ``by_weight==False``.
- ``'Floyd-Warshall-Cython'``: the Cython implementation of the
Floyd-Warshall algorithm. Works only if ``by_weight==False``.
- ``'Floyd-Warshall-Python'``: the Python implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed).
- ``'Floyd-Warshall_Boost'``: the Boost implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed).
- ``'Dijkstra_NetworkX'``: the Dijkstra algorithm, implemented in
NetworkX. It works with weighted graphs, but no negative weight is
allowed.
- ``'Dijkstra_Boost'``: the Dijkstra algorithm, implemented in Boost
(works only with positive weights).
- ``'Johnson_Boost'``: the Johnson algorithm, implemented in Boost
(works also with negative weights, if there is no negative cycle).
- ``None`` (default): Sage chooses the best algorithm: ``'BFS'`` if
``by_weight`` is ``False``, ``'Dijkstra_Boost'`` if all weights are
positive, ``'Floyd-Warshall_Boost'`` otherwise.
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l``, if ``l``
is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the weight_function outputs a number for each edge
OUTPUT:
A tuple ``(dist, pred)``. They are both dicts of dicts. The first
indicates the length ``dist[u][v]`` of the shortest weighted path from
`u` to `v`. The second is a compact representation of all the paths - it
indicates the predecessor ``pred[u][v]`` of `v` in the shortest path
from `u` to `v`.
.. NOTE::
Only reachable vertices are present in the dictionaries.
.. NOTE::
There is a Cython version of this method that is usually much faster
for large graphs, as most of the time is actually spent building the
final double dictionary. Everything on the subject is to be found in
the :mod:`~sage.graphs.distances_all_pairs` module.
EXAMPLES:
Some standard examples (see :meth:`~GenericGraph.shortest_paths` for
more examples on how to use the input variables)::
sage: G = Graph( { 0: {1: 1}, 1: {2: 1}, 2: {3: 1}, 3: {4: 2}, 4: {0: 2} }, sparse=True)
sage: G.plot(edge_labels=True).show() # long time
sage: dist, pred = G.shortest_path_all_pairs(by_weight = True)
sage: dist
{0: {0: 0, 1: 1, 2: 2, 3: 3, 4: 2}, 1: {0: 1, 1: 0, 2: 1, 3: 2, 4: 3}, 2: {0: 2, 1: 1, 2: 0, 3: 1, 4: 3}, 3: {0: 3, 1: 2, 2: 1, 3: 0, 4: 2}, 4: {0: 2, 1: 3, 2: 3, 3: 2, 4: 0}}
sage: pred
{0: {0: None, 1: 0, 2: 1, 3: 2, 4: 0}, 1: {0: 1, 1: None, 2: 1, 3: 2, 4: 0}, 2: {0: 1, 1: 2, 2: None, 3: 2, 4: 3}, 3: {0: 1, 1: 2, 2: 3, 3: None, 4: 3}, 4: {0: 4, 1: 0, 2: 3, 3: 4, 4: None}}
sage: pred[0]
{0: None, 1: 0, 2: 1, 3: 2, 4: 0}
sage: G = Graph( { 0: {1: {'weight':1}}, 1: {2: {'weight':1}}, 2: {3: {'weight':1}}, 3: {4: {'weight':2}}, 4: {0: {'weight':2}} }, sparse=True)
sage: dist, pred = G.shortest_path_all_pairs(weight_function = lambda e:e[2]['weight'])
sage: dist
{0: {0: 0, 1: 1, 2: 2, 3: 3, 4: 2}, 1: {0: 1, 1: 0, 2: 1, 3: 2, 4: 3}, 2: {0: 2, 1: 1, 2: 0, 3: 1, 4: 3}, 3: {0: 3, 1: 2, 2: 1, 3: 0, 4: 2}, 4: {0: 2, 1: 3, 2: 3, 3: 2, 4: 0}}
sage: pred
{0: {0: None, 1: 0, 2: 1, 3: 2, 4: 0}, 1: {0: 1, 1: None, 2: 1, 3: 2, 4: 0}, 2: {0: 1, 1: 2, 2: None, 3: 2, 4: 3}, 3: {0: 1, 1: 2, 2: 3, 3: None, 4: 3}, 4: {0: 4, 1: 0, 2: 3, 3: 4, 4: None}}
So for example the shortest weighted path from `0` to `3` is obtained as
follows. The predecessor of `3` is ``pred[0][3] == 2``, the predecessor
of `2` is ``pred[0][2] == 1``, and the predecessor of `1` is
``pred[0][1] == 0``.
::
sage: G = Graph( { 0: {1:None}, 1: {2:None}, 2: {3: 1}, 3: {4: 2}, 4: {0: 2} }, sparse=True )
sage: G.shortest_path_all_pairs()
({0: {0: 0, 1: 1, 2: 2, 3: 2, 4: 1},
1: {0: 1, 1: 0, 2: 1, 3: 2, 4: 2},
2: {0: 2, 1: 1, 2: 0, 3: 1, 4: 2},
3: {0: 2, 1: 2, 2: 1, 3: 0, 4: 1},
4: {0: 1, 1: 2, 2: 2, 3: 1, 4: 0}},
{0: {0: None, 1: 0, 2: 1, 3: 4, 4: 0},
1: {0: 1, 1: None, 2: 1, 3: 2, 4: 0},
2: {0: 1, 1: 2, 2: None, 3: 2, 4: 3},
3: {0: 4, 1: 2, 2: 3, 3: None, 4: 3},
4: {0: 4, 1: 0, 2: 3, 3: 4, 4: None}})
sage: G.shortest_path_all_pairs(weight_function=lambda e:(e[2] if e[2] is not None else 1))
({0: {0: 0, 1: 1, 2: 2, 3: 3, 4: 2},
1: {0: 1, 1: 0, 2: 1, 3: 2, 4: 3},
2: {0: 2, 1: 1, 2: 0, 3: 1, 4: 3},
3: {0: 3, 1: 2, 2: 1, 3: 0, 4: 2},
4: {0: 2, 1: 3, 2: 3, 3: 2, 4: 0}},
{0: {0: None, 1: 0, 2: 1, 3: 2, 4: 0},
1: {0: 1, 1: None, 2: 1, 3: 2, 4: 0},
2: {0: 1, 1: 2, 2: None, 3: 2, 4: 3},
3: {0: 1, 1: 2, 2: 3, 3: None, 4: 3},
4: {0: 4, 1: 0, 2: 3, 3: 4, 4: None}})
| |
<reponame>losek1/Sounder5<gh_stars>1-10
try:
from tkinter import Tk, ttk, StringVar, BooleanVar, DoubleVar, Canvas, Event, IntVar, PhotoImage
from tkinter.filedialog import askdirectory, askopenfilename, asksaveasfile
from tkinter.messagebox import askyesno
from os.path import isfile, join, isdir, basename, abspath, join, splitext, dirname, exists
from os import startfile, listdir, walk, getpid
from json import load, dump
from json.decoder import JSONDecodeError
from logging import basicConfig, error
from traceback import format_exc
from PIL import Image, ImageTk
from io import BytesIO
from random import choices, shuffle
from string import ascii_uppercase, digits
from Components.SystemTheme import get_theme
# from Components.Debugger import Debugger
from Components.SongMenu import SongMenu
from Components.DirWatcher import DirWatcher
from requests import get
from threading import Thread
from mutagen.mp3 import MP3
from mutagen.flac import FLAC
from mutagen.oggvorbis import OggVorbis
from difflib import SequenceMatcher
from re import findall
from pygame import mixer
# from time import sleep
from win10toast import ToastNotifier
from psutil import Process
from typing import Union
import ctypes
from time import sleep
except ImportError as err:
exit(err)
class Sounder(Tk):
def __init__(self) -> None:
super().__init__()
# init logging errors
self.init_logging()
# hide window
self.withdraw()
# configure window
self.minsize(800, 500)
self.title('Sounder')
self.protocol('WM_DELETE_WINDOW', self.exit_app)
# self.bind('<F12>', lambda _: Debugger(self))
# self.attributes('-alpha', 0.9)
# init notifications
Thread(target=self.init_notifications, daemon=True).start()
# init settings
self.init_settings()
self.apply_settings()
# init layout
self.init_layout()
# load icons
self.load_icons()
# init theme
self.apply_theme()
# init screen
self.deiconify()
self.init_important_panels()
# self.init_important_panels()
self.update_idletasks()
# init ui
self.init_ui()
# init player
self.init_player()
# show main panel
self.after(50, lambda: self.player_panel.lift())
def init_important_panels(self) -> None:
try:
# error panel
self.error_panel: ttk.Frame = ttk.Frame(self)
error_content: ttk.Frame = ttk.Frame(self.error_panel)
ttk.Label(error_content, image=self.icons['error'], text='Something went wrong', compound='top', style='second.TLabel').pack(side='top')
self.error_label: ttk.Label = ttk.Label(error_content, text='We are unable to display the error message!', style='third.TLabel')
self.error_label.pack(side='top')
ttk.Button(error_content, text='Exit', style='third.TButton', command=self.exit_app).pack(side='top', pady=(50, 0), padx=10)
ttk.Button(error_content, text='Ignore', style='third.TButton', command=lambda: self.error_panel.lower()).pack(side='top', pady=(10, 0), padx=10)
ttk.Button(error_content, text='Open Logs', style='third.TButton', command=self.open_logs).pack(side='top', pady=(10, 0), padx=10)
error_content.place(relx=.5, rely=.5, anchor='center')
ttk.Label(self.error_panel, text=f'version: {self.version[0]} [build: {self.version[1]}]', style='third.TLabel').pack(side='bottom', anchor='w', padx=10, pady=5)
self.error_panel.place(x=0, y=0, relwidth=1, relheight=1)
# init panel
init_panel: ttk.Frame = ttk.Frame(self, style='second.TFrame')
ttk.Label(init_panel, image=self.icons['logo']).place(relx=.5, rely=.5, anchor='center')
init_panel.place(x=0, y=0, relwidth=1, relheight=1)
except Exception as err_obj:
self.log(err_obj)
def init_logging(self) -> None:
# logging error messages
basicConfig(filename=fr'Resources\\Dumps\\sounder_dump.txt', level=40)
self.process = Process(getpid())
def log(self, err_obj) -> None:
# DING!!!!!!
self.bell()
# log error to file
error(err_obj, exc_info=True)
self.error_label['text'] = format_exc().split("\n")[-2]
self.error_panel.lift()
# stop playback
try:
mixer.music.stop()
except Exception as _:
pass
def init_notifications(self) -> None:
self.toaster = ToastNotifier()
def init_settings(self) -> None:
try:
# variables
default_settings: dict = {'search_compensation': 0.7, 'delete_missing': False, 'follow': 1, 'crossfade': 100, 'shuffle': False, 'start_playback': False, 'playlist': 'Library', 'repeat': 'None', 'buffer': 'Normal', 'last_song': '', 'volume': 0.5, 'sort_by': 'A-Z', 'scan_subfolders': False, 'geometry': '800x500', 'wheel_acceleration': 1.0, 'updates': True, 'folders': [], 'use_system_theme': True, 'theme': 'Light', 'page': 'Library', 'playlists': {'Favorites': {'Name': 'Favorites', 'Songs': []}}}
self.settings: dict = {}
self.version: tuple = ('0.7.7', '190721')
# load settings
if isfile(r'Resources\\Settings\\Settings.json'):
with open(r'Resources\\Settings\\Settings.json', 'r') as data:
try:
self.settings = load(data)
except JSONDecodeError as err_obj:
self.settings = default_settings
else:
self.settings = default_settings
# open sounder configurator
from Components.Setup import SSetup
SSetup(self, self.settings).mainloop()
# verify settings
for key in default_settings:
self.settings[key] = self.settings.get(key, default_settings[key])
# verify playlist
if not 'Favorites' in self.settings['playlists']: self.settings['playlists']['Favorites'] = {'Name': 'Favorites', 'Songs': []}
except Exception as err_obj:
self.log(err_obj)
def apply_settings(self) -> None:
# check theme
if self.settings['use_system_theme']:
self.settings['theme'] = get_theme()
# check for updates
if self.settings['updates']:
self.after(5000, self.update_thread)
# bind escape to root window
self.bind('<Escape>', lambda _: self.focus_set())
# bind scroll to content
self.bind('<MouseWheel>', self.on_wheel)
# apply geometry
self.geometry(self.settings['geometry'])
def save_settings(self) -> None:
# save last page
active_panel: str = self.menu_option.get()
if active_panel != 'Updates':
self.settings['page'] = active_panel
# save player state ...
# save app geometry
self.settings['geometry'] = f'{self.geometry()}'
# save active playlists
self.settings['playlist'] = self.playlist
try:
with open(r'Resources\\Settings\\Settings.json', 'w') as data:
dump(self.settings, data)
except Exception as err_obj:
self.log(err_obj)
def restore_default(self) -> None:
if askyesno('Restore default configuration', 'Are you sure you want to restore the default configuration?', icon='warning'):
self.settings = {}
self.exit_app()
def init_layout(self) -> None:
# init theme object
self.layout = ttk.Style()
# set theme to clam
self.layout.theme_use('clam')
# button
self.layout.layout('TButton', [('Button.padding', {'sticky': 'nswe', 'children': [('Button.label', {'sticky': 'nswe'})]})])
# radiobutton
self.layout.layout('TRadiobutton', [('Radiobutton.padding', {'sticky': 'nswe', 'children': [('Radiobutton.label', {'sticky': 'nswe'})]})])
# scrollbar
self.layout.layout('Vertical.TScrollbar', [('Vertical.Scrollbar.trough', {'children': [('Vertical.Scrollbar.thumb', {'expand': '1', 'sticky': 'nswe'})], 'sticky': 'ns'})])
# entry
self.layout.layout('TEntry', [('Entry.padding', {'sticky': 'nswe', 'children': [('Entry.textarea', {'sticky': 'nswe'})]})])
def apply_theme(self) -> None:
theme: dict = {'Dark': ['#111', '#212121', '#333', '#fff'], 'Light': ['#eee', '#fff', '#aaa', '#000']}
# window
self.configure(background=theme[self.settings['theme']][1])
# frame
self.layout.configure('TFrame', background=theme[self.settings['theme']][1])
self.layout.configure('second.TFrame', background=theme[self.settings['theme']][0])
# label
self.layout.configure('TLabel', background=theme[self.settings['theme']][0], relief='flat', font=('catamaran 13 bold'), foreground=theme[self.settings['theme']][3])
self.layout.configure('second.TLabel', background=theme[self.settings['theme']][1], font=('catamaran 20 bold'))
self.layout.configure('third.TLabel', background=theme[self.settings['theme']][1])
self.layout.configure('fourth.TLabel', background=theme[self.settings['theme']][1], font=('catamaran 16 bold'))
self.layout.configure('fifth.TLabel', background=theme[self.settings['theme']][0], font=('catamaran 10 bold'))
self.layout.configure('sixth.TLabel', background=theme[self.settings['theme']][0], font=('catamaran 8 bold'))
self.layout.configure('seventh.TLabel', background=theme[self.settings['theme']][0], font=('catamaran 16 bold'))
# radiobutton
self.layout.configure('TRadiobutton', background=theme[self.settings['theme']][0], relief='flat', font=('catamaran 13 bold'), foreground=theme[self.settings['theme']][3], anchor='w', padding=5, width=12)
self.layout.map('TRadiobutton', background=[('pressed', '!disabled', theme[self.settings['theme']][1]), ('active', theme[self.settings['theme']][1]), ('selected', theme[self.settings['theme']][1])])
self.layout.configure('second.TRadiobutton', anchor='center', padding=5, width=6)
self.layout.configure('third.TRadiobutton', anchor='center', padding=5, width=8)
self.layout.configure('fourth.TRadiobutton', anchor='center')
self.layout.configure('fifth.TRadiobutton', font=('catamaran 12 bold'), background=theme[self.settings['theme']][1], foreground=theme[self.settings['theme']][3], anchor='center', padding=4, width=6)
self.layout.map('fifth.TRadiobutton', background=[('pressed', '!disabled', theme[self.settings['theme']][0]), ('active', theme[self.settings['theme']][0]), ('selected', theme[self.settings['theme']][0])])
# button
self.layout.configure('TButton', background=theme[self.settings['theme']][0], relief='flat', font=('catamaran 13 bold'), foreground=theme[self.settings['theme']][3], anchor='w')
self.layout.map('TButton', background=[('pressed', '!disabled', theme[self.settings['theme']][1]), ('active', theme[self.settings['theme']][1]), ('selected', theme[self.settings['theme']][1])])
self.layout.configure('second.TButton', background=theme[self.settings['theme']][1], anchor='center')
self.layout.map('second.TButton', background=[('pressed', '!disabled', theme[self.settings['theme']][0]), ('active', theme[self.settings['theme']][0]), ('selected', theme[self.settings['theme']][0])])
self.layout.configure('third.TButton', background=theme[self.settings['theme']][0], anchor='center')
self.layout.map('third.TButton', background=[('pressed', '!disabled', theme[self.settings['theme']][1]), ('active', theme[self.settings['theme']][1]), ('selected', theme[self.settings['theme']][1])])
self.layout.configure('fourth.TButton', anchor='center', background=theme[self.settings['theme']][1], width=20)
self.layout.map('fourth.TButton', background=[('pressed', '!disabled', theme[self.settings['theme']][0]), ('active', theme[self.settings['theme']][0]), ('selected', theme[self.settings['theme']][0])])
# scrollbar
self.layout.configure('Vertical.TScrollbar', gripcount=0, relief='flat', background=theme[self.settings['theme']][1], darkcolor=theme[self.settings['theme']][1], lightcolor=theme[self.settings['theme']][1], troughcolor=theme[self.settings['theme']][1], bordercolor=theme[self.settings['theme']][1])
self.layout.map('Vertical.TScrollbar', background=[('pressed', '!disabled', theme[self.settings['theme']][0]), ('disabled', theme[self.settings['theme']][1]), ('active', theme[self.settings['theme']][0]), ('!active', theme[self.settings['theme']][0])])
# scale
self.layout.map('Horizontal.TScale', background=[('pressed', '!disabled', theme[self.settings['theme']][2]), ('active', theme[self.settings['theme']][2])])
# self.layout.configure('Horizontal.TScale', troughcolor='#151515', background='#333', relief="flat", gripcount=0, darkcolor="#151515", lightcolor="#151515", bordercolor='#151515')
self.layout.configure('Horizontal.TScale', troughcolor=theme[self.settings['theme']][0], background=theme[self.settings['theme']][1], relief='flat', gripcount=0, darkcolor=theme[self.settings['theme']][0], lightcolor=theme[self.settings['theme']][0], bordercolor=theme[self.settings['theme']][0])
# entry
self.layout.configure('TEntry', background=theme[self.settings['theme']][1], foreground=theme[self.settings['theme']][3], fieldbackground=theme[self.settings['theme']][0], selectforeground=theme[self.settings['theme']][3], selectbackground=theme[self.settings['theme']][2])
self.layout.map('TEntry', foreground=[('active', '!disabled', 'disabled', theme[self.settings['theme']][3])])
self.layout.configure('second.TEntry', background=theme[self.settings['theme']][0])
# progressbar
self.layout.configure("Horizontal.TProgressbar", background=theme[self.settings['theme']][1], lightcolor=theme[self.settings['theme']][0], darkcolor=theme[self.settings['theme']][0], bordercolor=theme[self.settings['theme']][0], troughcolor=theme[self.settings['theme']][0], thickness=2)
def load_icons(self) -> None:
self.icons: dict = {
'error': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\error.png'),
'library': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\library.png'),
'folder': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\folder.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\music_folder.png')),
'settings': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\settings.png'),
'plus': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\plus.png'),
'heart': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\heart_empty.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\heart_filled.png')),
'delete': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\delete.png'),
'playlist': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\playlist.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\lounge.png')),
'play_pause': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\play.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\pause.png')),
'next': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\next.png'),
'previous': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\previous.png'),
'repeat': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\repeat.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\repeat_one.png')),
'shuffle': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\shuffle.png'),
'edit': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\edit.png'),
'menu': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\menu.png'),
'date': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\date.png'),
'note': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\note.png'),
'arrow': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\left.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\right.png')),
'checkmark': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\checkmark.png'),
'restore': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\restore.png'),
'brush': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\brush.png'),
'info': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\info.png'),
'window': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\window.png'),
'user': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\user.png'),
'icons8': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\icons8.png'),
'code': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\code.png'),
'download': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\download.png'),
'wheel': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\wheel.png'),
'search': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\search.png'),
'filter': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\filter.png'),
'speaker': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\muted.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\low_volume.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\med_volume.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\max_volume.png')),
'buffer': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\buffer.png'),
'select': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\select.png'),
'power': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\power.png'),
'time': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\time.png'),
'sort': (PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\no_sort.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\normal_sort.png'), PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\reversed_sort.png')),
'puzzled': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\puzzled.png'),
'package': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\package.png'),
'shield': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\shield.png'),
'trash': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\trash.png'),
'logo': PhotoImage(file=fr'Resources\\Icons\\{self.settings["theme"]}\\logo.png'),
}
self.iconbitmap(fr'Resources\\Icons\\{self.settings["theme"]}\\icon.ico')
def init_ui(self) -> None:
# ui variables
self.menu_option: StringVar = StringVar(value=self.settings['page'])
self.menu_playlist: StringVar = StringVar(value=self.settings['page'])
self.muted: bool = True if self.settings['volume'] == 0.0 else False
self.last_panel: str = ''
self.folder_panels: dict = {}
self.song_panels: dict = {}
self.settings_panels: tuple = ()
self.update_panels: list = []
# theme
self.theme: StringVar = StringVar()
if self.settings['use_system_theme']:
self.theme.set('System')
else:
self.theme.set(self.settings['theme'])
# update
self.updates: BooleanVar = BooleanVar(value=self.settings['updates'])
# wheel acceleration
self.wheel_acceleration: DoubleVar = DoubleVar(value=self.settings['wheel_acceleration'])
# search compensation
self.search_compensation: DoubleVar = DoubleVar(value=self.settings['search_compensation'])
# scan subfolders
self.scan_subfolders: BooleanVar = BooleanVar(value=self.settings['scan_subfolders'])
# sort by
self.sort_by: StringVar = StringVar(value=self.settings['sort_by'])
# buffer mode
self.buffer: StringVar = StringVar(value=self.settings['buffer'])
# playback
self.start_playback: BooleanVar = BooleanVar(value=self.settings['start_playback'])
# crossfade
self.crossfade: DoubleVar = DoubleVar(value=self.settings['crossfade'])
# follow
self.follow: IntVar = IntVar(value=self.settings['follow'])
# missing
self.delete_missing: BooleanVar = BooleanVar(value=self.settings['delete_missing'])
# player panel
self.player_panel: ttk.Frame = ttk.Frame(self)
# top panel
player_top_panel: ttk.Frame = ttk.Frame(self.player_panel)
# menu panel
self.menu_panel: ttk.Frame = ttk.Frame(player_top_panel, style='second.TFrame')
ttk.Radiobutton(self.menu_panel, image=self.icons['library'], text='Library', compound='left', value='Library', variable=self.menu_option, command=self.show_panel).pack(side='top', fill='x', padx=10, pady=(10, 0))
ttk.Radiobutton(self.menu_panel, image=self.icons['folder'][1], text='Folders', compound='left', value='Folders', variable=self.menu_option, command=self.show_panel).pack(side='top', fill='x', padx=10, pady=10)
ttk.Radiobutton(self.menu_panel, image=self.icons['settings'], text='Settings', compound='left', value='Settings', variable=self.menu_option, command=self.show_panel).pack(side='bottom', fill='x', padx=10, pady=(0, 10))
ttk.Label(self.menu_panel, text='Playlists').pack(side='top', fill='x', padx=10, pady=(0, 10))
ttk.Button(self.menu_panel, image=self.icons['plus'], text='Add playlist', compound='left', command=self.add_playlist).pack(side='top', fill='x', padx=10, pady=(0, 10))
ttk.Radiobutton(self.menu_panel, image=self.icons['heart'][1], text='Favorites', compound='left', value='Favorites', variable=self.menu_playlist, command=self.show_playlist).pack(side='top', fill='x', padx=10)
# add playlist from settings
for playlist in self.settings['playlists']:
if playlist == 'Favorites': continue
ttk.Radiobutton(self.menu_panel, image=self.icons['playlist'][0], text=self.settings['playlists'][playlist]['Name'], compound='left', value=playlist, style='menu.TRadiobutton', variable=self.menu_playlist, command=self.show_playlist).pack(side='top', fill='x', padx=10, pady=(10, 0))
self.menu_panel.pack(side='left', fill='both')
# player scrollbar
player_content_scroll = ttk.Scrollbar(player_top_panel, orient='vertical')
player_content_scroll.pack(side='right', fill='y')
# options panel
player_options_panel: ttk.Frame = ttk.Frame(player_top_panel)
# update options panel
self.update_options: ttk.Frame = ttk.Frame(player_options_panel)
ttk.Label(self.update_options, image=self.icons['download'], text='Updates', style='fourth.TLabel', compound='left').pack(side='left', anchor='center', padx=(10, 0))
self.update_options.place(x=0, | |
#!/usr/bin/env python
################################################################################
# _ ____ ___ _ _ _ #
# / \ / ___|_ _| | | (_)_ __ | |_ #
# / _ \| | | | | | | | '_ \| __| #
# / ___ \ |___ | | | |___| | | | | |_ #
# /_/ \_\____|___| |_____|_|_| |_|\__| #
# #
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
acilint - A static configuration analysis tool for examining ACI Fabric
configuration for potential problems and unused configuration.
"""
import sys
from acitoolkit.acitoolkit import Tenant, AppProfile, Context, EPG, BridgeDomain
from acitoolkit.acitoolkit import OutsideL3, OutsideEPG, OutsideNetwork
from acitoolkit.acitoolkit import Contract, ContractSubject, InputTerminal
from acitoolkit.acitoolkit import OutputTerminal, Filter, FilterEntry
from acitoolkit.acitoolkit import Credentials, Session
from acitoolkit.acifakeapic import FakeSession
import argparse
import ipaddress
class Checker(object):
"""
Checker class contains a series of lint checks that are executed against the
provided configuration.
"""
def __init__(self, session, output, fh=None):
print('Getting configuration from APIC....')
self.tenants = Tenant.get_deep(session)
self.output = output
self.file = fh
print('Processing configuration....')
def output_handler(self, msg):
"""
Print(the supplied string in a format appropriate to the output medium.)
:param msg: The message to be printed.
"""
if self.output == 'console':
print(msg)
elif self.output == 'html':
color_map = {'Error': '#FF8C00',
'Critical': '#FF0000',
'Warning': '#FFFF00'}
sev = msg.split(':')[0].split(' ')[0]
rule = msg.split(':')[0].split(' ')[1]
descr = msg.split(': ')[1]
self.file.write("""
<tr>
<td bgcolor="{0}">{1}</td>
<td bgcolor="{0}">{2}</td>
<td bgcolor="{0}">{3}</td>
</tr>
""".format(color_map[sev], sev, rule, descr))
@staticmethod
def ensure_tagged(objects, tags):
"""
Checks that a set of objects are tagged with at least one tag
from the set of tags.
"""
for obj in objects:
tagged = False
for tag in tags:
if obj.has_tag(tag):
tagged = True
if not tagged:
return False
return True
def warning_001(self):
"""
W001: Tenant has no app profile
"""
for tenant in self.tenants:
if len(tenant.get_children(AppProfile)) == 0:
self.output_handler("Warning 001: Tenant '%s' has no Application "
"Profile." % tenant.name)
def warning_002(self):
"""
W002: Tenant has no context
"""
for tenant in self.tenants:
if len(tenant.get_children(Context)) == 0:
self.output_handler("Warning 002: Tenant '%s' has no Context." % tenant.name)
def warning_003(self):
"""
W003: AppProfile has no EPGs
"""
for tenant in self.tenants:
for app in tenant.get_children(AppProfile):
if len(app.get_children(EPG)) == 0:
self.output_handler("Warning 003: AppProfile '%s' in Tenant '%s'"
"has no EPGs." % (app.name, tenant.name))
def warning_004(self):
"""
W004: Context has no BridgeDomain
"""
for tenant in self.tenants:
contexts = []
for context in tenant.get_children(Context):
contexts.append(context.name)
for bd in tenant.get_children(BridgeDomain):
if bd.has_context():
context = bd.get_context().name
if context in contexts:
contexts.remove(context)
for context in contexts:
self.output_handler("Warning 004: Context '%s' in Tenant '%s' has no "
"BridgeDomains." % (context, tenant.name))
def warning_005(self):
"""
W005: BridgeDomain has no EPGs assigned
"""
for tenant in self.tenants:
bds = []
for bd in tenant.get_children(BridgeDomain):
bds.append(bd.name)
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
if epg.has_bd():
bd = epg.get_bd().name
if bd in bds:
bds.remove(bd)
for bd in bds:
self.output_handler("Warning 005: BridgeDomain '%s' in Tenant '%s'"
" has no EPGs." % (bd, tenant.name))
def warning_006(self):
"""
W006: Contract is not provided at all.
"""
for tenant in self.tenants:
contracts = []
for contract in tenant.get_children(Contract):
contracts.append(contract.name)
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
provided = epg.get_all_provided()
for contract in provided:
if contract.name in contracts:
contracts.remove(contract.name)
for contract in contracts:
self.output_handler("Warning 006: Contract '%s' in Tenant '%s' is not"
" provided at all." % (contract, tenant.name))
def warning_007(self):
"""
W007: Contract is not consumed at all.
"""
for tenant in self.tenants:
contracts = []
for contract in tenant.get_children(Contract):
contracts.append(contract.name)
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
consumed = epg.get_all_consumed()
for contract in consumed:
if contract.name in contracts:
contracts.remove(contract.name)
for contract in contracts:
self.output_handler("Warning 007: Contract '%s' in Tenant '%s' is not"
" consumed at all." % (contract, tenant.name))
def warning_008(self):
"""
W008: EPG providing contracts but in a Context with no enforcement.
"""
for tenant in self.tenants:
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
if len(epg.get_all_provided()):
if epg.has_bd():
bd = epg.get_bd()
if bd.has_context():
context = bd.get_context()
if context.get_allow_all():
self.output_handler("Warning 008: EPG '%s' providing "
"contracts in Tenant '%s', App"
"Profile '%s' but Context '%s' "
"is not enforcing." % (epg.name,
tenant.name,
app.name,
context.name))
def warning_010(self):
"""
W010: EPG providing contract but consuming EPG is in a different
context.
"""
provide_db = {}
for tenant in self.tenants:
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
if epg.has_bd():
bd = epg.get_bd()
if bd.has_context():
context = bd.get_context()
provided = epg.get_all_provided()
for contract in provided:
if tenant.name not in provide_db:
provide_db[tenant.name] = {}
if contract.name not in provide_db[tenant.name]:
provide_db[tenant.name][contract.name] = []
if context.name not in provide_db[tenant.name][contract.name]:
provide_db[tenant.name][contract.name].append(context.name)
for tenant in self.tenants:
if tenant.name not in provide_db:
self.output_handler("Warning 010: No contract provided within"
" this tenant '%s'" % tenant.name)
continue # don't repeat this message for each option below.
epgs = []
for app in tenant.get_children(AppProfile):
for epg in app.get_children(EPG):
epgs.append(epg)
for epg in epgs:
if epg.has_bd():
bd = epg.get_bd()
if bd.has_context():
context = bd.get_context()
consumed = epg.get_all_consumed()
for contract in consumed:
if contract.name not in provide_db[tenant.name]:
self.output_handler("Warning 010: Contract '%s' not provided "
"within the same tenant "
"'%s'" % (contract.name, tenant.name))
elif context.name not in provide_db[tenant.name][contract.name]:
self.output_handler("Warning 010: Contract '%s' not provided in context '%s' "
"where it is being consumed for"
" tenant '%s'" % (contract.name, context.name, tenant.name))
@staticmethod
def subj_matches_proto(filterlist, protocol):
"""
This routine will return True/False if the list of filters has a filter
that matches the specified protocol.
:param filterlist: The list of filters to inspect.
:param protocol: The protocol we are looking for.
"""
for subjfilter in filterlist:
for entry in subjfilter.get_children(FilterEntry):
entryAttrs = entry.get_attributes()
if entryAttrs['prot'] == protocol:
return True
return False
def warning_011(self):
"""
W011: Contract has Bidirectional TCP Subjects.
"""
for tenant in self.tenants:
for contract in tenant.get_children(Contract):
is_tcp_bidi = 0
for subject in contract.get_children(ContractSubject):
if self.subj_matches_proto(subject.get_filters(), 'tcp'):
is_tcp_bidi = 3
break
in_terminal = subject.get_children(InputTerminal)
out_terminal = subject.get_children(OutputTerminal)
if in_terminal:
in_filterlist = in_terminal[0].get_filters()
else:
in_filterlist = ()
if out_terminal:
out_filterlist = out_terminal[0].get_filters()
else:
out_filterlist = ()
if in_filterlist:
if self.subj_matches_proto(in_filterlist, 'tcp'):
is_tcp_bidi = 1
if out_filterlist:
if self.subj_matches_proto(out_filterlist, 'tcp'):
is_tcp_bidi += 1
# Otherwise, either there are no terminals so it's a permit
# everything which doesn't count.
if is_tcp_bidi:
break
if is_tcp_bidi == 3:
self.output_handler("Warning 011: In tenant '%s' contract "
"'%s' is a Bidirectional TCP contract."
% (tenant.name, contract.name))
elif is_tcp_bidi == 2:
self.output_handler("Warning 011: In tenant '%s' contract "
"'%s' is an explictly "
"Bidirectional TCP contract."
% (tenant.name, contract.name))
def warning_012(self):
"""
W012: Contract has Bidirectional UDP Subjects.
"""
for tenant in self.tenants:
for contract in tenant.get_children(Contract):
is_udp_bidi = 0
for subject in contract.get_children(ContractSubject):
if self.subj_matches_proto(subject.get_filters(), 'udp'):
is_udp_bidi = 3
break
in_terminal = subject.get_children(InputTerminal)
out_terminal = subject.get_children(OutputTerminal)
if in_terminal:
in_filterlist = in_terminal[0].get_filters()
else:
in_filterlist = ()
if out_terminal:
out_filterlist = out_terminal[0].get_filters()
else:
out_filterlist = ()
if in_filterlist:
if self.subj_matches_proto(in_filterlist, 'udp'):
is_udp_bidi = 1
if out_filterlist:
if self.subj_matches_proto(out_filterlist, 'udp'):
is_udp_bidi += 1
# Otherwise, either there are no terminals so it's a permit
# everything which doesn't count.
if is_udp_bidi:
break
if is_udp_bidi == 3:
self.output_handler("Warning 012: In tenant '%s' contract "
"'%s' is a Bidirectional UDP contract."
% (tenant.name, contract.name))
elif is_udp_bidi == 2:
self.output_handler("Warning 012: In tenant '%s' contract "
"'%s' is an explictly "
"Bidirectional UDP contract."
% (tenant.name, contract.name))
def warning_013(self):
"""
W013: Contract has no Subjects.
"""
for tenant in self.tenants:
for contract in tenant.get_children(Contract):
if len(contract.get_children(ContractSubject)) == 0:
self.output_handler("Warning 013: In | |
fig = plt.figure(figsize=(20, 12))
gs = gridspec.GridSpec(30, 1) #縦,横
ax1 = plt.subplot(gs[0:16, 0])
ax2 = plt.subplot(gs[16:21, 0])
ax3 = plt.subplot(gs[21:26, 0])
ax4 = plt.subplot(gs[26:31, 0])
time = tohlc[TIME]
open = tohlc[OPEN]
high = tohlc[HIGH]
low = tohlc[LOW]
close = tohlc[CLOSE]
ohlc = []
for o, h, l, c in zip(open, high, low, close):
ohlc.append([o, h, l, c])
if timeframe.symbol == 'S10':
begin = 300
elif timeframe.symbol == 'M1':
begin = 30
graph1 = CandlePlot(fig, ax1, title)
graph1.xlimit(display_time_range)
graph1.drawCandle(time, ohlc, timerange=display_time_range)
graph3 = CandlePlot(fig, ax3, 'SigmaBand')
graph2 = CandlePlot(fig, ax2, 'DMA')
windows =[5, 15, 60, 120]
colors = [None, 'red', 'blue', 'orange']
mas = {}
for w, color in zip(windows, colors):
ma = SMA(close, w)
mas['MA' + str(w)] = ma
if color is not None:
graph1.drawLine(time, ma, color=color, label='MA' + str(w))
crosses = drawCrossing(graph1, time, mas)
#upper1, lower1 = BB(close, 12, 1.0)
#upper2, lower2 = BB(close, 12, 2.0)
#graph1.drawLine(time, upper1, color='green', linestyle='dashed', linewidth=1.0, label='MA12+sigma')
#graph1.drawLine(time, lower1, color='green', linestyle='dashed', linewidth=1.0, label='MA12-sigma')
#graph1.drawLine(time, upper2, color='green', linewidth=1.0, label='MA12+2sigma')
#graph1.drawLine(time, lower2, color='green', linewidth=1.0, label='MA12-2sigma')
if trades is not None:
for trade in trades:
status, open_time, close_time, open_price, close_price, profit = trade
if status == LONG:
color = 'green'
marker = '^'
elif status == SHORT:
color = 'red'
marker = 'v'
graph1.drawMarker(open_time, open_price, marker, color)
graph1.drawMarker(close_time, close_price, '*', color)
graph4 = CandlePlot(fig, ax4, 'ATR w=15')
graph4.xlimit(display_time_range)
dji_range = np.arange(10, 40, 10)
if timeframe.symbol == 'M1':
gold_range = np.arange(0.2, 11.0, 0.2)
elif timeframe.symbol == 'M5':
gold_range = np.arange(0.5, 11.0, 0.5)
if market == 'dji':
rng = dji_range
elif market == 'gold':
rng = gold_range
for v in rng:
graph4.hline(v, color='lightgray')
if timeframe.symbol == 'S10':
limit = 20
elif timeframe.symbol == 'M1':
limit = 50
if market == 'dji':
limit = 50
elif market == 'gold':
limit = 1.0
if timeframe.symbol == 'M5':
limit *= 5.0
graph4.drawLine(time, atr, color='blue', ylim=(0, limit), label='ATR w=5')
if ticks is not None:
drawTicks(graph1, graph2, ticks, display_time_range)
return
drawDMA(graph2, market, timeframe, time, display_time_range, mas)
drawCrosses2(graph2, crosses)
drawSigmaBand(graph3, time, display_time_range, close)
drawCrosses2(graph3, crosses)
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
def crossOver(time, ma_fast, ma_mid, ma_slow):
cross_over = []
golden_cross = []
cross_under = []
dead_cross = []
OVER = 1
UNDER = -1
for i in range(len(ma_fast)):
if i == 0:
continue
else:
if ma_fast[i - 1] < ma_mid[ i - 1] and ma_fast[i] >= ma_mid[i]:
if ma_mid[i] >= ma_slow[i]:
golden_cross.append([i, time[i], ma_mid[i]])
else:
cross_over.append([i, time[i], ma_mid[i]])
elif ma_fast[i - 1] > ma_mid[i - 1] and ma_fast[i] <= ma_mid[i]:
if ma_mid[i] <= ma_slow[i]:
dead_cross.append([i, time[i], ma_mid[i]])
else:
cross_under.append([i, time[i], ma_mid[i]])
return (golden_cross, dead_cross, cross_over, cross_under)
def drawCrossing(graph, time, mas):
ma_fast = mas['MA15']
ma_mid = mas['MA60']
ma_slow = mas['MA120']
crosses = crossOver(time, ma_fast, ma_mid, ma_slow)
drawCrosses(graph, crosses)
return crosses
def drawCrosses(graph, crosses):
(golden_cross, dead_cross, cross_over, cross_under) = crosses
for i, t, v in golden_cross:
graph.vline(t, color='orange', linewidth=2)
for i, t, v in dead_cross:
graph.vline(t, color='gray', linewidth=2)
for i, t, v in cross_over:
graph.drawMarker(t, v + 1, 'X', 'Cyan', markersize=5)
for i, t, v in cross_under:
graph.drawMarker(t, v - 1, 'X', 'Red', markersize=5)
def drawCrosses2(graph, crosses):
(golden_cross, dead_cross, cross_over, cross_under) = crosses
for i, t, v in golden_cross:
graph.vline(t, color='orange', linewidth=2)
for i, t, v in dead_cross:
graph.vline(t, color='gray', linewidth=2)
for i, t, v in cross_over:
graph.vline(t, color='orange', linewidth=1)
for i, t, v in cross_under:
graph.vline(t, color='gray', linewidth=1)
def drawSigmaBand(graph, time, trange, price):
sigma1 = SigmaRate(price,60, 1)
sigma2 = SigmaRate(price,15, 1)
graph.xlimit(trange)
graph.drawBar(time, sigma1, ylim=(-4, 4), label='w=60')
graph.drawLine(time, sigma2, color='blue', label='w=15/60')
graph.hline(-1.0, color='lightgray')
graph.hline(1.0, color='lightgray')
graph.hline(0.0, color='lightgray', linewidth=2.0)
graph.hline(2.0, color='lightgray')
graph.hline(-2.0, color='lightgray')
graph.ax.set_xticks([])
#graph.drawLine(time, sigma2, color='red', ylim=(-15, 15), label=label1)
#graph.drawLine(time, dif3, color='orange', ylim=(-15, 15), label=label3)
#graph.drawBar(time, dif2, ylim=(-15, 15), label=label3)
return
def drawDMA(graph, market, timeframe, time, trange, mas):
if timeframe.symbol == 'S10':
label1 = 'dMA60'
label2 = 'dMA100'
label3 = 'dMA200'
dif1 = delta(mas['MA60'] , 6.0)
dif2 = delta(mas['MA100'], 6.0)
dif3 = delta(mas['MA200'], 6.0)
else:
label1 = 'dMA5'
label2 = 'dMA15'
label3 = 'dMA60'
if timeframe.symbol == 'M1':
c = 1.0
elif timeframe.symbol == 'M5':
c = 1.0 / 5.0
dif1 = delta(mas['MA5'] , c)
dif2 = delta(mas['MA15'], c)
dif3 = delta(mas['MA60'], c)
#graph2 = CandlePlot(fig, ax2, 'deltaMA')
if market == 'dji':
lim = (-15, 15)
elif market == 'gold':
if timeframe.symbol == 'M1':
lim = (-0.5, 0.5)
elif timeframe.symbol == 'M5':
lim = (-0.2, 0.2)
graph.xlimit(trange)
graph.drawLine(time, dif1, color='blue', ylim=lim, label=label1)
graph.drawLine(time, dif3, color='orange', ylim=lim, label=label3)
graph.drawBar(time, dif2, ylim=lim, label=label3)
graph.ax.set_xticks([])
return
def drawTicks(graph1, graph2, ticks, trange):
ttick = ticks[TIME]
bid = ticks[BID]
graph.drawLine(ttick, bid, color='blue', label='Tick')
ma = SMA(bid, 30)
dlt = delta(ma, 1.0)
#spread = ticks[SPREAD]
#graph = CandlePlot(fig, ax2, 'deltaTick')
graph.xlimit(trange)
graph.drawBar(ttick, dlt, ylim=(-1.5, 1.5), label='delta tick')
#graph.drawLine(ttick, spread, color='red', ylim=(-20, 20), label='spread')
return
def priceRange(ohlc):
p = []
for o, h, l, c in ohlc:
p.append(c)
return (max(p), min(p))
NOTHING = 0
LONG = 1
SHORT = -1
class Position:
def __init__(self, loss_cut, threshold, delay):
self.threshold = threshold
self.loss_cut = loss_cut
self.status = NOTHING
self.delay = delay
self.peak = None
self.spread = 0.0
def long(self, index, time, price):
self.status = LONG
self.open_time = time
self.open_index = index
self.close_time = None
self.open_price = price
self.close_price = None
self.peak = price
def short(self, index, time, price):
self.status = SHORT
self.open_index = index
self.open_time = time
self.close_time = None
self.open_price = price
self.close_price = None
self.peak = price
def update(self, index, time, price, dma_fast, dma_slow):
if self.status == NOTHING:
if dma_slow > self.threshold and dma_fast > dma_slow:
self.long(index, time, price)
elif dma_slow < -self.threshold and dma_fast < dma_slow:
self.short(index, time, price)
return None
should_close = False
elapsed = index - self.open_index
if elapsed < self.delay:
return None
k = 1.0
if self.status == LONG:
profit = price - self.open_price - self.spread
if price > self.peak:
self.peak = price
if dma_fast <= dma_slow * k:
should_close = True
elif self.status == SHORT:
profit = self.open_price - price - self.spread
if price < self.peak:
self.peak = price
if dma_fast >= dma_slow * k:
should_close = True
if profit <= - self.loss_cut:
should_close = True
if should_close:
return self.square(time, price)
else:
return None
def square(self, time, price):
self.close_time = time
self.close_price = price
if self.status == LONG:
self.profit = self.close_price - self.open_price
elif self.status == SHORT:
self.profit = self.open_price - self.close_price
r = self.result()
self.status = NOTHING
return r
def result(self):
return [self.status, self.open_time, self.close_time, self.open_price, self.close_price, self.profit]
INACTIVE = 0
ACTIVE_BEGIN = 1
ACTIVE = 2
ACTIVE_END = 3
class Simulation:
def __init__(self, dic, timeframe, trade_time_range):
time = dic[TIME]
close = dic[CLOSE]
dma_slow = dic[DMA_SLOW]
dma_fast = dic[DMA_FAST]
sigma = dic[SIGMA]
t = time[0]
self.trade_begin = datetime(t.year, t.month, t.day, trade_time_range[0][0], trade_time_range[0][1])
self.trade_end = datetime(t.year, t.month, t.day, trade_time_range[1][0], trade_time_range[1][1])
if self.trade_end < self.trade_begin:
self.trade_end += timedelta(days=1)
self.time = time
self.close = close
self.dma_slow = dma_slow
self.dma_fast = dma_fast
self.timeframe = timeframe
self.ma_window_slow = 5
self.ma_window_fast= 12
self.length = len(time)
self.sigma = sigma
def run(self, loscut, set_threshold, delay):
position = Position(loscut, set_threshold, delay)
trades = []
for i in range(self.length):
t = self.time[i]
if t < self.trade_begin:
continue
if t > self.trade_end:
break
price = self.close[i]
trade = position.update(i, t, price, self.dma_fast[i], self.dma_slow[i])
if trade is not None:
trades.append(trade)
if position.status != NOTHING:
trade = position.square(self.time[-1], self.close[-1])
trades.append(trade)
profit = 0.0
for trade in trades:
profit += trade[5]
return profit, trades
def drawByDay(market, tf, ticks, year, month):
for day in range(1, 32):
count, data = timeFilter(ticks, year, month, day, [[21, 30], [4, 30]])
#ticks = fromDb(market, year, month, day, [22, 23], None)
if count > | |
#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--midiout", help="enable midi output", action="store_true")
parser.add_argument("-c", "--midiport", type=int, help="connect to midi port")
parser.add_argument("-l", "--listmidiports", help="list midi ports and exit", action="store_true")
parser.add_argument("-p", "--projectname", help="project name, use *.lhp extension", type=str)
parser.add_argument('--version', action='version', version=subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip().decode("utf-8"))
args = parser.parse_args()
import pygame_sdl2 as pygame
from pygame_sdl2.locals import *
from lhpFunctions import *
import operator
import pickle #used for saving and loading projects
import math #used for scrolling screen
import re
if args.listmidiports or args.midiport:
args.midiout = True
if args.midiout:
import time
import rtmidi
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
print("MIDIout enabled")
if args.listmidiports:
for i,y in enumerate(available_ports):
print(i+1, ":", y)
quit()
if args.midiport:
midiout.open_port(args.midiport-1)
print("lhp connected to", available_ports[args.midiport-1])
elif not args.midiport and args.midiout:
midiout.open_virtual_port("lhp")
print("Created virtual MIDIport: lhp")
if args.projectname:
print("Project name:", args.projectname)
#lista_nota = pickle.load(open( args.projectname, "rb" ))
else:
args.projectname = "save.lhp"
#Start of the program ###########################################
pygame.init()
#scale and display resolution
display_scale_factor = 8 #display scale factor becouse original resolution is 160x90
display_width = 160*display_scale_factor #this is graphics resolution, hardcoded
display_height = 90*display_scale_factor #this is graphics resolution, hardcoded
#screen is a pygame default window
screen = pygame.display.set_mode((display_width,display_height))
#program caption title
pygame.display.set_caption('lilypond 18.9.3')
#RGB colors definitions
color_black = (0,0,0)
color_white = (255,255,255)
boja_note_vani = (113, 50, 255)
boja_note_nutra = (203, 139, 57)
boja_note_povisilica_vani = (0, 255, 255)
boja_note_povisilica_nutra = (0, 255, 255)
boja_note_snizilica_vani = (255, 0, 255)
boja_note_snizilica_nutra = (255, 0, 255)
boja_pauze_vani = (0, 148, 0)
boja_pauze_nutra = (48, 212, 0)
lista_boja = [boja_note_vani, boja_note_nutra, boja_note_vani, boja_note_nutra, boja_note_vani, boja_note_nutra, boja_pauze_vani, boja_pauze_nutra]
#pygame clock
clock = pygame.time.Clock()
#variable which exits the program
crashed = False
pic_prvi_takt = pygame.image.load('../image/prvi_takt.png')
pic_drugi_takt = pygame.image.load('../image/drugi_takt.png')
pic_zadnji_takt = pygame.image.load('../image/zadnji_takt.png')
pic_plavi_okvir = pygame.image.load('../image/rub_plavi.png')
pic_kljucevi = pygame.image.load('../image/kljucevi.png')
pic_cursor = pygame.image.load('../image/cursor.png')
pic_slova = pygame.image.load('../image/slova.png')
#loading cursor sprites into list
#cursor_color = 0
list_sprite_cursor = []
for i in range(0,6):
pic_cursor.set_clip(pygame.Rect(pozicijaSprite(i,3),0,3,7))
list_sprite_cursor.append(pic_cursor.subsurface(pic_cursor.get_clip()))
#loading letter sprites into list
list_sprite_slova = []
#range of letters in the list
#range_slova = 24
range_slova = len(spriteSlova)
for i in range(0,range_slova):
pic_slova.set_clip(pygame.Rect(pozicijaSprite(i,3),0,3,5))
list_sprite_slova.append(pic_slova.subsurface(pic_slova.get_clip()))
#bliting functions
#bliting of the first bar
def blit_prvi_takt(x,y):
screen.blit(pygame.transform.scale(pic_prvi_takt, (97*display_scale_factor, 61*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#bliting of the second bar
def blit_drugi_takt(x,y):
screen.blit(pygame.transform.scale(pic_drugi_takt, (96*display_scale_factor, 61*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#bliting of the last bar line
def blit_zadnji_takt(x,y):
screen.blit(pygame.transform.scale(pic_zadnji_takt, (9*display_scale_factor, 61*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#bliting the blue border
def blit_rub(x,y):
screen.blit(pygame.transform.scale(pic_plavi_okvir, (18*display_scale_factor, 90*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#bliting of clefs
def blit_kljucevi(x,y):
screen.blit(pygame.transform.scale(pic_kljucevi, (18*display_scale_factor, 121*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#bliting of the cursor
def blit_cursor(x_left,y_left,x_right,y_right,sprite):
if sprite == 0:
screen.blit(pygame.transform.scale(list_sprite_cursor[0], (3*display_scale_factor, 7*display_scale_factor)), (x_left*display_scale_factor, y_left*display_scale_factor))
screen.blit(pygame.transform.scale(list_sprite_cursor[1], (3*display_scale_factor, 7*display_scale_factor)), (x_right*display_scale_factor, y_right*display_scale_factor))
elif sprite == 1:
screen.blit(pygame.transform.scale(list_sprite_cursor[2], (3*display_scale_factor, 7*display_scale_factor)), (x_left*display_scale_factor, y_left*display_scale_factor))
screen.blit(pygame.transform.scale(list_sprite_cursor[3], (3*display_scale_factor, 7*display_scale_factor)), (x_right*display_scale_factor, y_right*display_scale_factor))
else:
screen.blit(pygame.transform.scale(list_sprite_cursor[4], (3*display_scale_factor, 7*display_scale_factor)), (x_left*display_scale_factor, y_left*display_scale_factor))
screen.blit(pygame.transform.scale(list_sprite_cursor[5], (3*display_scale_factor, 7*display_scale_factor)), (x_right*display_scale_factor, y_right*display_scale_factor))
#bliting of a letter
def blit_slovo(x, y, slovo):
screen.blit(pygame.transform.scale(list_sprite_slova[slovo%range_slova], (3*display_scale_factor, 5*display_scale_factor)), (x*display_scale_factor, y*display_scale_factor))
#cursor init
#curosr offset in pixel
trajanje_offset = 4
#cursor init in grid values
obj_cursor = cursor(0, 20, 0) #numbers are pixels in which the cursor is initialized
lista_nota = []
list_chords = []
list_markup = []
shift_status = 0
drugi_takt_lijevi = 115
drugi_takt_desni = 115
broj_taktova = 4
fake_scroll = 0
midi_notes = []
tempo = 216
#tempo = 120
midiplay = 0
def ajdemi():
obj_cursor.apsolute_x = obj_cursor.pozicija - (obj_cursor.bg_scroll_x) - fake_scroll
#print("obj_cursor.apsolute_x", obj_cursor.apsolute_x)
#print("obj_cursor.pozicija", obj_cursor.pozicija)
#print("obj_cursor.bg_scroll_x", obj_cursor.bg_scroll_x)
##print("fake_scroll", fake_scroll)
#modes defined
insert_mode = 0
insert_mode_cursor_length = 0
chord_mode = 0
markup_mode = 0
old_mode = 0
g_mode = 0
def modes():
return(insert_mode + old_mode + g_mode + chord_mode + markup_mode)
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
#Keyboard buttons without MODS
if event.type == pygame.KEYDOWN:
if pygame.key.get_mods() == 0:
#modes defined
#= enter chord mode
if event.key == pygame.K_c and not modes():
swap_cursor_ton = obj_cursor.ton
#swap_cursor_pozicija = obj_cursor.pozicija
chord_mode = 1
#obj_cursor.bg_scroll_x = 0
#obj_cursor.bg_scroll_y = 0
#obj_cursor.pozicija = 0
obj_cursor.ton = 32
obj_cursor.trajanje = 15
if event.key == pygame.K_m and not modes():
swap_cursor_ton = obj_cursor.ton
markup_mode = 1
obj_cursor.ton = 8
obj_cursor.trajanje = 15
#= enter old mode
if event.key == pygame.K_EQUALS and not modes():
old_mode = 1
#i insert note before the current cursor possition
if event.key == pygame.K_i and not modes():
insert_mode = 1
obj_cursor.trajanje = insert_mode_cursor_length
#i insert note before the current cursor possition
if event.key == pygame.K_g and not modes():
g_mode = 1
if event.key == pygame.K_ESCAPE:
old_mode = 0
insert_mode = 0
g_mode = 0
if chord_mode:
obj_cursor.ton = swap_cursor_ton
#obj_cursor.pozicija = swap_cursor_pozicija
chord_mode = 0
if markup_mode:
obj_cursor.ton = swap_cursor_ton
#obj_cursor.pozicija = swap_cursor_pozicija
markup_mode = 0
insert_mode_cursor_length = obj_cursor.trajanje
obj_cursor.trajanje = 0
#no modes keys
if not modes():
if event.key in (pygame.K_RIGHT, pygame.K_l):
obj_cursor.pozicija += 1
if event.key in (pygame.K_LEFT, pygame.K_h):
if obj_cursor.pozicija > -15:
obj_cursor.pozicija -= 1
if event.key in (pygame.K_UP, pygame.K_k):
if obj_cursor.ton < 40:
obj_cursor.ton += 1
if event.key in (pygame.K_DOWN, pygame.K_j):
if obj_cursor.ton > 0:
obj_cursor.ton -= 1
#w: Move forward to the beginning of a word.
if event.key == pygame.K_w:
x = [ (i.pozicija,i.ton) for i in lista_nota if i.pozicija > obj_cursor.pozicija ]
if x:
obj_cursor.pozicija, obj_cursor.ton = min(x, key = lambda t: t[0])
#b: Move backward to the beginning of a word.
if event.key == pygame.K_b:
x = [ (i.pozicija,i.ton) for i in lista_nota if i.pozicija < obj_cursor.pozicija ]
if x:
obj_cursor.pozicija, obj_cursor.ton = max(x, key = lambda t: t[0])
#e: Move to the end of a word.
if event.key == pygame.K_e:
for i,y in enumerate(list(lista_nota)): #prolazi kroz sve note i broji po redu
if findNote(y, obj_cursor.pozicija, obj_cursor.trajanje) in (1,2):
obj_cursor.pozicija = y.pozicija + y.trajanje
#a: Append text following current cursor position
if event.key == pygame.K_a:
insert_mode = 1
obj_cursor.pozicija += 1
#o Open up a bar following the current bar and add notes there
if event.key == pygame.K_o:
insert_mode = 1
obj_cursor.pozicija = int(obj_cursor.pozicija / 16) * 16 + 16
fake_scroll = -20
if lista_nota:
for i in lista_nota:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += 16
if list_chords:
for i in list_chords:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += 16
if list_markup:
for i in list_markup:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += 16
broj_taktova += 1
#x delete (cut) current character
if event.key == pygame.K_x:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota:
lista_nota.remove(i)
#p play note as midi
if event.key == pygame.K_p:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
midi_notes = [[i,time.clock(), 0] for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
# testing
if event.key == pygame.K_y:
obj_cursor.apsolute_x = obj_cursor.pozicija - (obj_cursor.bg_scroll_x) - fake_scroll
print("obj_cursor.apsolute_x", obj_cursor.apsolute_x)
print("obj_cursor.pozicija", obj_cursor.pozicija)
print("obj_cursor.bg_scroll_x", obj_cursor.bg_scroll_x)
print("fake_scroll", fake_scroll)
#insert mode
if insert_mode:
if event.key == pygame.K_RIGHT:
obj_cursor.pozicija += 1
if event.key == pygame.K_LEFT:
if obj_cursor.pozicija > -15:
obj_cursor.pozicija -= 1
if event.key == pygame.K_UP:
if obj_cursor.ton < 40:
obj_cursor.ton += 1
if event.key == pygame.K_DOWN:
if obj_cursor.ton > 0:
obj_cursor.ton -= 1
if event.key == pygame.K_1:
obj_cursor.trajanje = 15
if event.key == pygame.K_2:
obj_cursor.trajanje = 7
if event.key == pygame.K_4:
obj_cursor.trajanje = 3
if event.key == pygame.K_8:
obj_cursor.trajanje = 1
if event.key == pygame.K_6:
obj_cursor.trajanje = 0
if event.key == pygame.K_3:
obj_cursor.trajanje = 1
if event.key == pygame.K_RETURN:
#if list is not empty
if lista_nota:
for i in lista_nota:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += obj_cursor.trajanje + 1
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 0))
obj_cursor.pozicija += obj_cursor.trajanje + 1
#if list jet is empty first time only
else:
lista_nota.append(dodaj_notu(obj_cursor.pozicija, obj_cursor.ton, obj_cursor.trajanje, 0))
obj_cursor.pozicija += obj_cursor.trajanje + 1
if event.key == pygame.K_SPACE:
#if list is not empty
if lista_nota:
for i in lista_nota:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija += obj_cursor.trajanje + 1
#lista_nota.append(dodaj_notu(obj_cursor.pozicija, 20, obj_cursor.trajanje, 3))
obj_cursor.pozicija += obj_cursor.trajanje + 1
#if list jet is empty first time only
else:
#lista_nota.append(dodaj_notu(obj_cursor.pozicija, 20, obj_cursor.trajanje, 3))
obj_cursor.pozicija += obj_cursor.trajanje + 1
if event.key == pygame.K_BACKSPACE:
obj_cursor.pozicija -= obj_cursor.trajanje + 1
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota:
lista_nota.remove(i)
if lista_nota:
for i in lista_nota:
if i.pozicija >= obj_cursor.pozicija:
i.pozicija -= obj_cursor.trajanje + 1
if event.key == pygame.K_DELETE:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota:
lista_nota.remove(i)
if lista_nota:
for i in lista_nota:
if i.pozicija > obj_cursor.pozicija:
i.pozicija -= obj_cursor.trajanje + 1
#p play note as midi
if event.key == pygame.K_p:
if midiplay == 0:
midi_notes = [[i,time.clock(), 0] for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
#old mode
if old_mode:
if event.key == pygame.K_RIGHT:
obj_cursor.pozicija += 8
if event.key == pygame.K_LEFT:
if obj_cursor.pozicija > -15:
obj_cursor.pozicija -= 8
if event.key == | |
<filename>Acquire/Client/_user.py
import os as _os
from enum import Enum as _Enum
from datetime import datetime as _datetime
import time as _time
from Acquire.Service import call_function as _call_function
from Acquire.Service import Service as _Service
from Acquire.ObjectStore import bytes_to_string as _bytes_to_string
from Acquire.ObjectStore import string_to_bytes as _string_to_bytes
from Acquire.Crypto import PrivateKey as _PrivateKey
from Acquire.Crypto import PublicKey as _PublicKey
from ._qrcode import create_qrcode as _create_qrcode
from ._qrcode import has_qrcode as _has_qrcode
from ._errors import UserError, LoginError
# If we can, import socket to get the hostname and IP address
try:
import socket as _socket
_has_socket = True
except:
_has_socket = False
__all__ = ["User", "username_to_uid", "uid_to_username", "get_session_keys"]
class _LoginStatus(_Enum):
EMPTY = 0
LOGGING_IN = 1
LOGGED_IN = 2
LOGGED_OUT = 3
ERROR = 4
def _get_identity_url():
"""Function to discover and return the default identity url"""
return "http://172.16.17.32:8080/t/identity"
def _get_identity_service(identity_url=None):
"""Function to return the identity service for the system"""
if identity_url is None:
identity_url = _get_identity_url()
privkey = _PrivateKey()
response = _call_function(identity_url, response_key=privkey)
try:
service = _Service.from_data(response["service_info"])
except:
raise LoginError("Have not received the identity service info from "
"the identity service at '%s' - got '%s'" %
(identity_url, response))
if not service.is_identity_service():
raise LoginError(
"You can only use a valid identity service to log in! "
"The service at '%s' is a '%s'" %
(identity_url, service.service_type()))
if identity_url != service.service_url():
service.update_service_url(identity_url)
return service
def uid_to_username(user_uid, identity_url=None):
"""Function to return the username for the passed uid"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
user_uid=str(user_uid))
return response["username"]
def username_to_uid(username, identity_url=None):
"""Function to return the uid for the passed username"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=str(username))
return response["user_uid"]
def get_session_keys(username=None, user_uid=None, session_uid=None,
identity_url=None):
"""Function to return the session keys for the specified user"""
if username is None and user_uid is None:
raise ValueError("You must supply either the username or user_uid!")
if session_uid is None:
raise ValueError("You must supply a valid UID for a login session")
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=username,
user_uid=user_uid,
session_uid=session_uid)
try:
response["public_key"] = _PublicKey.from_data(response["public_key"])
except:
pass
try:
response["public_cert"] = _PublicKey.from_data(response["public_cert"])
except:
pass
return response
class User:
"""This class holds all functionality that would be used
by a user to authenticate with and access the service.
This represents a single client login, and is the
user-facing part of Acquire
"""
def __init__(self, username, identity_url=None):
"""Construct a null user"""
self._username = username
self._status = _LoginStatus.EMPTY
self._identity_service = None
if identity_url:
self._identity_url = identity_url
self._user_uid = None
def __str__(self):
return "User(name='%s', status=%s)" % (self.username(), self.status())
def __enter__(self):
"""Enter function used by 'with' statements'"""
pass
def __exit__(self, exception_type, exception_value, traceback):
"""Ensure that we logout"""
self.logout()
def __del__(self):
"""Make sure that we log out before deleting this object"""
self.logout()
def _set_status(self, status):
"""Internal function used to set the status from the
string obtained from the LoginSession"""
if status == "approved":
self._status = _LoginStatus.LOGGED_IN
elif status == "denied":
self._set_error_state("Permission to log in was denied!")
elif status == "logged_out":
self._status = _LoginStatus.LOGGED_OUT
def username(self):
"""Return the username of the user"""
return self._username
def uid(self):
"""Return the UID of this user. This uniquely identifies the
user across all systems
"""
if self._user_uid is None:
self._user_uid = username_to_uid(self.username(),
self.identity_service_url())
return self._user_uid
def status(self):
"""Return the current status of this user"""
return self._status
def _check_for_error(self):
"""Call to ensure that this object is not in an error
state. If it is in an error state then raise an
exception"""
if self._status == _LoginStatus.ERROR:
raise LoginError(self._error_string)
def _set_error_state(self, message):
"""Put this object into an error state, displaying the
passed message if anyone tries to use this object"""
self._status = _LoginStatus.ERROR
self._error_string = message
def session_key(self):
"""Return the session key for the current login session"""
self._check_for_error()
try:
return self._session_key
except:
return None
def signing_key(self):
"""Return the signing key used for the current login session"""
self._check_for_error()
try:
return self._signing_key
except:
return None
def identity_service(self):
"""Return the identity service info object for the identity
service used to validate the identity of this user
"""
if self._identity_service:
return self._identity_service
self._identity_service = _get_identity_service(
self.identity_service_url())
return self._identity_service
def identity_service_url(self):
"""Return the URL to the identity service. This is the full URL
to the service, minus the actual function to be called, e.g.
https://function_service.com/t/identity
"""
self._check_for_error()
try:
return self._identity_url
except:
# return the default URL - this should be discovered...
return _get_identity_url()
def login_url(self):
"""Return the URL that the user must connect to to authenticate
this login session"""
self._check_for_error()
try:
return self._login_url
except:
return None
def login_qr_code(self):
"""Return a QR code of the login URL that the user must connect to
to authenticate this login session"""
self._check_for_error()
try:
return self._login_qrcode
except:
return None
def session_uid(self):
"""Return the UID of the current login session. Returns None
if there is no valid login session"""
self._check_for_error()
try:
return self._session_uid
except:
return None
def is_empty(self):
"""Return whether or not this is an empty login (so has not
been used for anything yet..."""
return self._status == _LoginStatus.EMPTY
def is_logged_in(self):
"""Return whether or not the user has successfully logged in"""
return self._status == _LoginStatus.LOGGED_IN
def is_logging_in(self):
"""Return whether or not the user is in the process of loggin in"""
return self._status == _LoginStatus.LOGGING_IN
def logout(self):
"""Log out from the current session"""
if self.is_logged_in() or self.is_logging_in():
identity_url = self.identity_service_url()
if identity_url is None:
return
# create a permission message that can be signed
# and then validated by the user
permission = "Log out request for %s" % self._session_uid
signature = self.signing_key().sign(permission)
print("Logging out %s from session %s" % (self._username,
self._session_uid))
result = _call_function(
identity_url, "logout",
args_key=self.identity_service().public_key(),
username=self._username,
session_uid=self._session_uid,
permission=permission,
signature=_bytes_to_string(signature))
print(result)
self._status = _LoginStatus.LOGGED_OUT
return result
def register(self, password, identity_url=None):
"""Request to register this user with the identity service running
at 'identity_url', using the supplied 'password'. This will
return a QR code that you must use immediately to add this
user on the identity service to a QR code generator"""
if self._username is None:
return None
if identity_url is None:
identity_url = _get_identity_url()
privkey = _PrivateKey()
result = _call_function(
identity_url, "register",
args_key=self.identity_service().public_key(),
response_key=privkey,
public_cert=self.identity_service().public_certificate(),
username=self._username, password=password)
try:
provisioning_uri = result["provisioning_uri"]
except:
raise UserError(
"Cannot register the user '%s' on "
"the identity service at '%s'!" %
(self._username, identity_url))
# return a QR code for the provisioning URI
return (provisioning_uri, _create_qrcode(provisioning_uri))
def request_login(self, login_message=None):
"""Request to authenticate as this user. This returns a login URL that
you must connect to to supply your login credentials
If 'login_message' is supplied, then this is passed to
the identity service so that it can be displayed
when the user accesses the login page. This helps
the user validate that they have accessed the correct
login page. Note that if the message is None,
then a random message will be generated.
"""
self._check_for_error()
if not self.is_empty():
raise LoginError("You cannot try to log in twice using the same "
"User object. Create another object if you want "
"to try to log in again.")
# first, create a private key that will be used
# to sign all requests and identify this login
session_key = _PrivateKey()
signing_key = _PrivateKey()
args = {"username": self._username,
"public_key": session_key.public_key().to_data(),
"public_certificate": signing_key.public_key().to_data(),
"ipaddr": None}
# get information from the local machine to help
# the user validate that the login details are correct
if _has_socket:
hostname = _socket.gethostname()
ipaddr = _socket.gethostbyname(hostname)
args["ipaddr"] = ipaddr
args["hostname"] = hostname
if login_message is None:
login_message = "User '%s' in process '%s' wants to log in..." % \
(_os.getlogin(), _os.getpid())
args["message"] = login_message
result = _call_function(
self.identity_service_url(), "request_login",
args_key=self.identity_service().public_key(),
response_key=session_key,
public_cert=self.identity_service().public_certificate(),
username=self._username,
public_key=session_key.public_key().to_data(),
public_certificate=signing_key.public_key().to_data(),
ipaddr=None,
message=login_message)
# look for status = 0
try:
status = int(result["status"])
except:
status = -1
try:
message = result["message"]
except:
message = str(result)
try:
prune_message = result["prune_message"]
print("Pruning old sessions...\n%s" % "\n".join(prune_message))
except:
pass
if status != 0:
error = "Failed to login. Error = %d. Message = %s" % \
(status, message)
self._set_error_state(error)
raise LoginError(error)
try:
login_url = result["login_url"]
except:
login_url = None
if login_url is None:
error = "Failed to login. Could not extract | |
float(value)
except:
dlg = wx.MessageDialog(None,
"Can not set item with format %s to value %s" % (fmt4, value),
'Change to item', wx.OK|wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return False
else:
return True
def GetValue(self, name, radio_dict):
try:
value = radio_dict[name]
except:
pass
else:
return value
# Value was not in radio_dict. Get it from conf. There are values for platform win_data_name and lin_data_name.
# The win_ and lin_ names are not in conf.
try:
fmt = local_conf.format4name[name]
except:
fmt = '' # not all items in conf are in section_data or receiver_data
try:
if fmt == 'dict': # make a copy for this radio
value = {}
value.update(getattr(conf, name))
elif fmt == 'list': # make a copy for this radio
value = getattr(conf, name)[:]
else:
value = str(getattr(conf, name))
except:
return ''
else:
return value
class Radios(BaseWindow): # The "Radios" first-level page
def __init__(self, parent):
BaseWindow.__init__(self, parent)
self.num_cols = 8
self.radio_name = None
self.cur_radio_text = self.AddTextL(1, 'xx', self.num_cols - 1)
self.SetCurrentRadioText()
self.NextRow()
self.NextRow()
item = self.AddTextL(1, "When Quisk starts, use the radio")
self.start_radio = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
self.start_radio.handler = self.OnChoiceStartup
self.NextRow()
item = self.AddTextL(1, "Add a new radio with the general type")
choices = []
for name, data in local_conf.receiver_data:
choices.append(name)
self.add_type = self.AddComboCtrl(2, '', choices=choices, no_edit=True)
self.add_type.SetSelection(0)
item = self.AddTextL(3, "and name the new radio")
self.add_name = self.AddComboCtrl(4, '', choices=["My Radio", "SR with XVtr", "SoftRock"])
item = self.AddPushButton(5, "Add")
self.Bind(wx.EVT_BUTTON, self.OnBtnAdd, item)
self.NextRow()
item = self.AddTextL(1, "Rename the radio named")
self.rename_old = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
item = self.AddTextL(3, "to the new name")
self.rename_new = self.AddComboCtrl(4, '', choices=["My Radio", "SR with XVtr", "SoftRock"])
item = self.AddPushButton(5, "Rename")
self.Bind(wx.EVT_BUTTON, self.OnBtnRename, item)
self.NextRow()
item = self.AddTextL(1, "Delete the radio named")
self.delete_name = self.AddComboCtrl(2, 'big_radio_name', choices=[], no_edit=True)
item = self.AddPushButton(3, "Delete")
self.Bind(wx.EVT_BUTTON, self.OnBtnDelete, item)
self.NextRow()
item = self.AddTextL(1, "Restart Quisk with new settings")
item = self.AddPushButton(2, "Restart Quisk", 1)
self.Bind(wx.EVT_BUTTON, self.OnBtnRestart, item)
if application.pulse_in_use:
pass #item.Enable(False) # Pulse requires a program exit to clean up
self.NextRow()
self.Fit()
self.SetupScrolling()
self.NewRadioNames()
def SetCurrentRadioText(self):
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_type = radio_dict['hardware_file_type']
if Settings[1] == "ConfigFileRadio":
text = 'The current radio is ConfigFileRadio, so all settings come from the config file. The hardware type is %s.' % radio_type
else:
text = "Quisk is running with settings from the radio %s. The hardware type is %s." % (Settings[1], radio_type)
self.cur_radio_text.SetLabel(text)
def DuplicateName(self, name):
if name in Settings[2] or name == "ConfigFileRadio":
dlg = wx.MessageDialog(self, "The name already exists. Please choose a different name.",
'Quisk', wx.OK)
dlg.ShowModal()
dlg.Destroy()
return True
return False
def OnBtnAdd(self, event):
name = self.add_name.GetValue().strip()
if not name or self.DuplicateName(name):
return
self.add_name.SetValue('')
typ = self.add_type.GetValue().strip()
if local_conf.AddRadio(name, typ):
if Settings[0] != "Ask me":
Settings[0] = name
self.NewRadioNames()
local_conf.settings_changed = True
def OnBtnRename(self, event):
old = self.rename_old.GetValue()
new = self.rename_new.GetValue().strip()
if not old or not new or self.DuplicateName(new):
return
self.rename_new.SetValue('')
if local_conf.RenameRadio(old, new):
if old == 'ConfigFileRadio' and Settings[1] == "ConfigFileRadio":
Settings[1] = new
elif Settings[1] == old:
Settings[1] = new
self.SetCurrentRadioText()
if Settings[0] != "Ask me":
Settings[0] = new
self.NewRadioNames()
local_conf.settings_changed = True
def OnBtnDelete(self, event):
name = self.delete_name.GetValue()
if not name:
return
dlg = wx.MessageDialog(self,
"Are you sure you want to permanently delete the radio %s?" % name,
'Quisk', wx.OK|wx.CANCEL|wx.ICON_EXCLAMATION)
ret = dlg.ShowModal()
dlg.Destroy()
if ret == wx.ID_OK and local_conf.DeleteRadio(name):
self.NewRadioNames()
local_conf.settings_changed = True
def OnChoiceStartup(self, ctrl):
choice = self.start_radio.GetValue()
if Settings[0] != choice:
Settings[0] = choice
local_conf.settings_changed = True
def NewRadioNames(self): # Correct all choice lists for changed radio names
choices = Settings[2][:] # can rename any available radio
self.rename_old.SetItems(choices)
self.rename_old.SetSelection(0)
if "ConfigFileRadio" in choices:
choices.remove("ConfigFileRadio")
if Settings[1] in choices:
choices.remove(Settings[1])
self.delete_name.SetItems(choices) # can not delete ConfigFileRadio nor the current radio
self.delete_name.SetSelection(0)
choices = Settings[2] + ["Ask me"]
if "ConfigFileRadio" not in choices:
choices.append("ConfigFileRadio")
self.start_radio.SetItems(choices) # can start any radio, plus "Ask me" and "ConfigFileRadio"
try: # Set text in control
index = choices.index(Settings[0]) # last used radio, or new or renamed radio
except:
num = len(Settings[2])
if len == 0:
index = 1
elif num == 1:
index = 0
else:
index = len(choices) - 2
Settings[0] = choices[index]
self.start_radio.SetSelection(index)
def OnBtnRestart(self, event):
application.startup_quisk = True
application.main_frame.OnBtnClose(event)
class RadioSection(BaseWindow): # The pages for each section in the second-level notebook for each radio
def __init__(self, parent, radio_name, section, names):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.names = names
self.num_cols = 8
#self.MarkCols()
self.NextRow(3)
col = 1
radio_dict = local_conf.GetRadioDict(radio_name)
for name, text, fmt, help_text, values in self.names:
if name == 'favorites_file_path':
self.favorites_path = radio_dict.get('favorites_file_path', '')
row = self.row
self.row = 1
item, self.favorites_combo, btn = self.AddTextComboHelp(1, text, self.favorites_path, values, help_text, False, span_text=1, span_combo=4)
self.favorites_combo.handler = self.OnButtonChangeFavorites
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeFavorites)
self.row = row
else:
if fmt[0:4] in ('dict', 'list'):
continue
if name[0:4] == platform_ignore:
continue
value = self.GetValue(name, radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(col, text, value, values, help_text, no_edit)
cb.handler = self.OnChange
cb.quisk_data_name = name
if col == 1:
col = 4
else:
col = 1
self.NextRow()
self.AddColSpacer(2, 20)
self.AddColSpacer(5, 20)
self.Fit()
self.SetupScrolling()
def OnButtonChangeFavorites(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(getattr(conf, 'favorites_file_in_use'))
dlg = wx.FileDialog(None, "Choose Favorites File", direc, fname, "*.txt", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.favorites_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.favorites_path = path
local_conf.GetRadioDict(self.radio_name)["favorites_file_path"] = path
local_conf.settings_changed = True
class RadioHardware(BaseWindow): # The Hardware page in the second-level notebook for each radio
def __init__(self, parent, radio_name):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.num_cols = 8
#self.MarkCols()
radio_dict = local_conf.GetRadioDict(radio_name)
radio_type = radio_dict['hardware_file_type']
data_names = local_conf.GetReceiverData(radio_type)
bsizer = self.AddBoxSizer(1, self.num_cols - 1)
item = self.AddTextL(-1, "These are the hardware settings for a radio of type %s" % radio_type, self.num_cols-1)
bsizer.Add(item)
self.NextRow(7)
col = 1
border = 2
for name, text, fmt, help_text, values in data_names:
if name == 'hardware_file_name':
self.hware_path = self.GetValue(name, radio_dict)
row = self.row
self.row = 3
item, self.hware_combo, btn = self.AddTextComboHelp(1, text, self.hware_path, values, help_text, False, span_text=1, span_combo=4)
self.hware_combo.handler = self.OnButtonChangeHardware
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeHardware)
self.row = row
elif name == 'widgets_file_name':
self.widgets_path = self.GetValue(name, radio_dict)
row = self.row
self.row = 5
item, self.widgets_combo, btn = self.AddTextComboHelp(1, text, self.widgets_path, values, help_text, False, span_text=1, span_combo=4)
self.widgets_combo.handler = self.OnButtonChangeWidgets
item = self.AddPushButtonR(7, "Change..", border=0)
item.Bind(wx.EVT_BUTTON, self.OnButtonChangeWidgets)
self.row = row
elif fmt[0:4] in ('dict', 'list'):
pass
elif name[0:4] == platform_ignore:
pass
else:
value = self.GetValue(name, radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(col, text, value, values, help_text, no_edit, border=border)
cb.handler = self.OnChange
cb.quisk_data_name = name
if col == 1:
col = 4
border = 0
else:
col = 1
border = 2
self.NextRow()
self.AddColSpacer(2, 20)
self.AddColSpacer(5, 20)
self.Fit()
self.SetupScrolling()
def OnButtonChangeHardware(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(self.hware_path)
dlg = wx.FileDialog(None, "Choose Hardware File", direc, fname, "*.py", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.hware_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.hware_path = path
local_conf.GetRadioDict(self.radio_name)["hardware_file_name"] = path
local_conf.settings_changed = True
def OnButtonChangeWidgets(self, event):
if isinstance(event, ComboCtrl):
path = event.GetValue()
else:
direc, fname = os.path.split(self.widgets_path)
dlg = wx.FileDialog(None, "Choose Widgets File", direc, fname, "*.py", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.widgets_combo.SetText(path)
dlg.Destroy()
else:
dlg.Destroy()
return
path = path.strip()
self.widgets_path = path
local_conf.GetRadioDict(self.radio_name)["widgets_file_name"] = path
local_conf.settings_changed = True
class RadioSound(BaseWindow): # The Sound page in the second-level notebook for each radio
"""Configure the available sound devices."""
sound_names = ( # same order as grid labels
('playback_rate', '', '', '', 'name_of_sound_play'),
('mic_sample_rate', 'mic_channel_I', 'mic_channel_Q', '', 'microphone_name'),
('sample_rate', 'channel_i', 'channel_q', 'channel_delay', 'name_of_sound_capt'),
('mic_playback_rate', 'mic_play_chan_I', 'mic_play_chan_Q', 'tx_channel_delay', 'name_of_mic_play'),
('', '', '', '', 'digital_input_name'),
('', '', '', '', 'digital_output_name'),
('', '', '', '', 'sample_playback_name'),
('', '', '', '', 'digital_rx1_name'),
)
def __init__(self, parent, radio_name):
BaseWindow.__init__(self, parent)
self.radio_name = radio_name
self.radio_dict = local_conf.GetRadioDict(self.radio_name)
self.num_cols = 8
thename = platform_accept + "latency_millisecs"
for name, text, fmt, help_text, values in local_conf.GetSectionData('Sound'):
if name == thename:
value = self.GetValue(name, self.radio_dict)
no_edit = "choice" in fmt or fmt == 'boolean'
txt, cb, btn = self.AddTextComboHelp(1, | |
[M] myLeoSettings.leo
[@] @mode, @button, @command
'''
if not d: return g.es('no bindings')
legend = g.adjustTripleString(legend, c.tab_width)
data = []
for stroke in sorted(d):
assert g.isStroke(stroke), stroke
aList = d.get(stroke, [])
for si in aList:
assert g.isShortcutInfo(si), si
s1 = '' if si.pane == 'all' else si.pane
s2 = k.prettyPrintKey(stroke)
s3 = si.commandName
s4 = si.kind or '<no hash>'
data.append((s1, s2, s3, s4),)
# Print keys by type:
result = []
result.append('\n' + legend)
for prefix in (
'Alt+Ctrl+Shift', 'Alt+Ctrl', 'Alt+Shift', 'Alt', # 'Alt+Key': done by Alt.
'Ctrl+Meta+Shift', 'Ctrl+Meta', 'Ctrl+Shift', 'Ctrl', # Ctrl+Key: done by Ctrl.
'Meta+Key', 'Meta+Shift', 'Meta',
'Shift',
# Careful: longer prefixes must come before shorter prefixes.
):
data2 = []
for item in data:
s1, s2, s3, s4 = item
if s2.startswith(prefix):
data2.append(item)
result.append('***** %s...\n' % prefix)
self.printBindingsHelper(result, data2, prefix=prefix)
# Remove all the items in data2 from data.
# This must be done outside the iterator on data.
for item in data2:
data.remove(item)
# Print all plain bindings.
result.append('***** Plain Keys...\n')
self.printBindingsHelper(result, data, prefix=None)
if not g.unitTesting:
g.es_print('', ''.join(result), tabName=tabName)
k.showStateAndMode()
return result # for unit test.
#@+node:ekr.20061031131434.120: *5* printBindingsHelper
def printBindingsHelper(self, result, data, prefix):
lm = g.app.loadManager
data.sort(key=lambda x: x[1])
data2, n = [], 0
for pane, key, commandName, kind in data:
key = key.replace('+Key', '')
# g.trace('%10s %s' % (key, repr(kind)))
letter = lm.computeBindingLetter(kind)
pane = '%s: ' % (pane) if pane else ''
left = pane + key # pane and shortcut fields
n = max(n, len(left))
data2.append((letter, left, commandName),)
for z in data2:
letter, left, commandName = z
result.append('%s %*s %s\n' % (letter, -n, left, commandName))
if data:
result.append('\n')
#@+node:ekr.20120520174745.9867: *4* k.printButtons
@cmd('print-buttons')
def printButtons(self, event=None):
'''Print all @button and @command commands, their bindings and their source.'''
k = self; c = k.c
tabName = '@buttons && @commands'
c.frame.log.clearTab(tabName)
def put(s):
g.es('', s, tabName=tabName)
data = []
for aList in [c.config.getButtons(), c.config.getCommands()]:
for z in aList:
p, script = z
c = p.v.context
tag = 'M' if c.shortFileName().endswith('myLeoSettings.leo') else 'G'
data.append((p.h, tag),)
for aList in [g.app.config.atLocalButtonsList, g.app.config.atLocalCommandsList]:
for p in aList:
data.append((p.h, 'L'),)
result = ['%s %s' % (z[1], z[0]) for z in sorted(data)]
result.extend([
'',
'legend:',
'G leoSettings.leo',
'L local .leo File',
'M myLeoSettings.leo',
])
put('\n'.join(result))
#@+node:ekr.20061031131434.121: *4* k.printCommands
@cmd('print-commands')
def printCommands(self, event=None):
'''Print all the known commands and their bindings, if any.'''
k = self; c = k.c; tabName = 'Commands'
c.frame.log.clearTab(tabName)
inverseBindingDict = k.computeInverseBindingDict()
data, n = [], 0
for commandName in sorted(c.commandsDict):
dataList = inverseBindingDict.get(commandName, [('', ''),])
for z in dataList:
pane, key = z
pane = '%s ' % (pane) if pane != 'all:' else ''
key = k.prettyPrintKey(key).replace('+Key', '')
s1 = pane + key
s2 = commandName
n = max(n, len(s1))
data.append((s1, s2),)
# This isn't perfect in variable-width fonts.
lines = ['%*s %s\n' % (-n, z1, z2) for z1, z2 in data]
g.es_print('', ''.join(lines), tabName=tabName)
#@+node:ekr.20061031131434.122: *4* k.repeatComplexCommand & helper
@cmd('repeat-complex-command')
def repeatComplexCommand(self, event):
'''Repeat the previously executed minibuffer command.'''
k = self
if k.mb_history:
k.setState('last-full-command', 1, handler=k.repeatComplexCommandHelper)
k.setLabelBlue("Redo: %s" % str(k.mb_history[0]))
else:
g.warning('no previous command')
#@+node:ekr.20131017100903.16689: *5* repeatComplexCommandHelper
def repeatComplexCommandHelper(self, event):
k = self; c = k.c
char = event.char if event else ''
if char in ('\n', 'Return') and k.mb_history:
last = k.mb_history[0]
k.resetLabel()
k.clearState() # Bug fix.
c.commandsDict[last](event)
else:
# g.trace('oops')
return k.keyboardQuit()
#@+node:ekr.20061031131434.123: *4* k.set-xxx-State
@cmd('set-command-state')
def setCommandState(self, event):
'''Enter the 'command' editing state.'''
# g.trace(g.callers())
k = self
k.setInputState('command', set_border=True)
# This command is also valid in headlines.
# k.c.bodyWantsFocus()
k.showStateAndMode()
@cmd('set-insert-state')
def setInsertState(self, event):
'''Enter the 'insert' editing state.'''
# g.trace(g.callers())
k = self
k.setInputState('insert', set_border=True)
# This command is also valid in headlines.
# k.c.bodyWantsFocus()
k.showStateAndMode()
@cmd('set-overwrite-state')
def setOverwriteState(self, event):
'''Enter the 'overwrite' editing state.'''
# g.trace(g.callers())
k = self
k.setInputState('overwrite', set_border=True)
# This command is also valid in headlines.
# k.c.bodyWantsFocus()
k.showStateAndMode()
#@+node:ekr.20061031131434.124: *4* k.toggle-input-state
@cmd('toggle-input-state')
def toggleInputState(self, event=None):
'''The toggle-input-state command.'''
k = self; c = k.c
default = c.config.getString('top_level_unbound_key_action') or 'insert'
state = k.unboundKeyAction
if default == 'insert':
state = 'command' if state == 'insert' else 'insert'
elif default == 'overwrite':
state = 'command' if state == 'overwrite' else 'overwrite'
else:
state = 'insert' if state == 'command' else 'command' # prefer insert to overwrite.
k.setInputState(state)
k.showStateAndMode()
#@+node:ekr.20061031131434.125: *3* k.Externally visible helpers
#@+node:ekr.20140816165728.18968: *4* Wrappers for GetArg methods
# New in Leo 5.4
def getNextArg(self, handler):
'''
Get the next arg. For example, after a Tab in the find commands.
See the docstring for k.get1Arg for examples of its use.
'''
# Replace the current handler.
self.getArgInstance.after_get_arg_state = ('getarg', 1, handler)
# New in Leo 5.4
def get1Arg(self, event, handler,
# returnKind=None, returnState=None,
prefix=None, tabList=None, completion=True, oneCharacter=False,
stroke=None, useMinibuffer=True
):
#@+<< docstring for k.get1arg >>
#@+node:ekr.20161020031633.1: *5* << docstring for k.get1arg >>
'''
k.get1Arg: Handle the next character the user types when accumulating a
user argument from the minibuffer. Ctrl-G will abort this processing at any
time.
Commands should use k.get1Arg to get the first minibuffer argument and
k.getNextArg to get all other arguments.
Before going into the many details, let's look at some examples. This
code will work in any class having a 'c' ivar bound to a commander.
Example 1: get one argument from the user:
@cmd('my-command')
def myCommand(self, event):
k = self.c.k
k.setLabelBlue('prompt: ')
k.get1Arg(event, handler=self.myCommand1)
def myCommand1(self, event):
k = self.c.k
# k.arg contains the argument.
# Finish the command.
...
# Reset the minibuffer.
k.clearState()
k.resetLabel()
k.showStateAndMode()
Example 2: get two arguments from the user:
@cmd('my-command')
def myCommand(self, event):
k = self.c.k
k.setLabelBlue('first prompt: ')
k.get1Arg(event, handler=self.myCommand1)
def myCommand1(self, event):
k = self.c.k
self.arg1 = k.arg
k.setLabelBlue('second prompt: ')
k.getNextArg(handler=self.myCommand2)
def myCommand2(self, event):
k = self.c.k
# k.arg contains second argument.
# Finish the command, using self.arg1 and k.arg.
...
# Reset the minibuffer.
k.clearState()
k.resetLabel()
k.showStateAndMode()
k.get1Arg and k.getNextArg are a convenience methods. They simply passes
their arguments to the get_arg method of the singleton GetArg instance. This
docstring describes k.get1arg and k.getNextArg as if they were the
corresponding methods of the GetArg class.
k.get1Arg is a state machine. Logically, states are tuples (kind, n, handler)
though they aren't represented that way. When the state machine in the
GetArg class is active, the kind is 'getArg'. This constant has special
meaning to Leo's key-handling code.
The arguments to k.get1Arg are as follows:
event: The event passed to the command.
handler=None, An executable. k.get1arg calls handler(event)
when the user completes the argument by typing
<Return> or (sometimes) <tab>.
tabList=[]: A list of possible completions.
completion=True: True if completions are enabled.
oneCharacter=False: True if k.arg should be a single character.
stroke=None: The incoming key stroke.
useMinibuffer=True: True: put focus in the minibuffer while accumulating arguments.
False allows sort-lines, for example, to show the selection range.
'''
#@-<< docstring for k.get1arg >>
returnKind, returnState = None, None
assert handler, g.callers()
self.getArgInstance.get_arg(event, returnKind, returnState, handler,
tabList, completion, oneCharacter, stroke, useMinibuffer)
def getArg(self, event,
returnKind=None, returnState=None, handler=None,
prefix=None, tabList=None, completion=True, oneCharacter=False,
stroke=None, useMinibuffer=True
):
'''Convenience method mapping k.getArg to ga.get_arg.'''
self.getArgInstance.get_arg(event, returnKind, returnState, handler,
tabList, completion, oneCharacter, stroke, useMinibuffer)
def doBackSpace(self, tabList, completion=True):
'''Convenience method mapping k.doBackSpace to ga.do_back_space.'''
self.getArgInstance.do_back_space(tabList, completion)
def doTabCompletion(self, tabList):
'''Convenience method mapping k.doTabCompletion to ga.do_tab.'''
self.getArgInstance.do_tab(tabList)
def getMinibufferCommandName(self):
'''
Convenience method mapping k.getMinibufferCommandName to
ga.get_minibuffer_command_name.
'''
return self.getArgInstance.get_minibuffer_command_name()
#@+node:ekr.20061031131434.130: *4* k.keyboardQuit
@cmd('keyboard-quit')
def keyboardQuit(self, event=None, setFocus=True, mouseClick=False):
'''
This method clears the state and the minibuffer label.
k.endCommand handles all other end-of-command chores.
'''
trace = False and not g.unitTesting
k = self; c = k.c
if trace: g.trace(g.callers())
if g.app.quitting:
return
# 2011/05/30: We may be called from Qt event handlers.
# Make sure to end editing!
c.endEditing()
# Completely clear the mode.
if setFocus:
c.frame.log.deleteTab('Mode')
c.frame.log.hideTab('Completion')
if k.inputModeName:
k.endMode()
# Complete clear the state.
k.state.kind = None
k.state.n = None
k.clearState()
k.resetLabel()
if setFocus:
c.bodyWantsFocus()
# At | |
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Define classes and functions to play with positions and coordinates systems. """
from __future__ import division
from __future__ import absolute_import
import numpy as np
import sys # .float_info import epsilon # to use assert on floating point equivalence
def from_seismo_to_cartesian(r, theta, phi):
""" Calculate the cartesian coordinates from spherical (w/ latitude) coordinates)
input:
r : radius (km)
theta : latitude (degree)
phi : longitude (degree)
output:
x, y, z
"""
theta = (90 - theta) * np.pi / 180. # colatitude in rad
phi = phi * np.pi / 180.
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
return x, y, z
def from_cartesian_to_seismo(x, y, z):
""" Calculate the spherical coordinates (w/ latitude) from cartesian coordinates)
r, theta, phi = from_cartesian_to_seismo(x, y, z)
input: x, y, z (same length)
output:
r : radius (km)
theta : latitude (degree)
phi : longitude (degree)
(same length as the input)
"""
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z / r) * 180. / np.pi # colatitude, in degree
if (x, y) == (0, 0):
phi = 0.
else:
phi = np.where(y >= 0., np.arccos(x / np.sqrt(x**2 + y**2)),
2. * np.pi - np.arccos(x / np.sqrt(x**2 + y**2)))
phi = phi * 180. / np.pi
return r, 90. - theta, phi
def angular_distance_to_point(theta1, phi1, theta2, phi2):
""" angular distance between the point (theta1, phi1) and the point (theta2, phi2) at the surface of the core.
Args:
theta1, theta2 : latitude (degree)
phi1, phi2: longitude (degree)
Return phi: angle between the two points (in degree)
"""
if np.array([theta1]).size == 1 and np.array(
[theta2]).size == 1 and theta1 == theta2 and phi1 == phi2:
return 0.
theta1, phi1, theta2, phi2 = theta1 * np.pi / 180., phi1 * \
np.pi / 180., theta2 * np.pi / 180., phi2 * np.pi / 180.
return np.arccos(np.sin(theta1) * np.sin(theta2) + np.cos(theta1)
* np.cos(theta2) * np.cos(abs(phi1 - phi2))) * 180. / np.pi
def straight_trajectory(Point1, Point2, N):
""" Trajectory is a straight line between Point1 and Point2, with N points.
Point1, Point2: Point()
N: integer (number of points on the trajectory)
Use the cartesian coordinates of both points.
"""
_Points = []
_vector = [Point2.x - Point1.x, Point2.y -
Point1.y, Point2.z - Point1.z]
_length = np.sqrt(_vector[0]**2 + _vector[1]**2 + _vector[2]**2)
for dx in np.linspace(0, 1, N):
_Points.append(CartesianPoint(
Point1.x + _vector[0] * dx, Point1.y + _vector[1] * dx, Point1.z + _vector[2] * dx))
return _Points[1:-1], _length
class Point():
""" Position of a point in the Earth.
can be computed in cartesian coordinates or in "seismological" coordinates.
Cartesian coordinates: x,y,z (z is the NS, y is the EW and axe x cross the 0 longitude)
Seismological coordinates: r, theta, phi (theta is the latitude)
"""
def __init__(self):
self.x, self.y, self.z, self.r, self.theta, self.phi = None, None, None, None, None, None
def add_cartesian(self):
assert(self.r is not None)
assert(self.phi is not None)
assert(self.theta is not None)
self.x, self.y, self.z = from_seismo_to_cartesian(
self.r, self.theta, self.phi)
def add_seismo(self):
assert(self.x is not None)
assert(self.y is not None)
assert(self.z is not None)
self.r, self.theta, self.phi = from_cartesian_to_seismo(
self.x, self.y, self.z)
def dimensionless(self, lengthscale):
self.r = self.r / lengthscale
self.x, self.y, self.z = self.x / lengthscale, \
self.y / lengthscale,\
self.z / lengthscale
def er(self):
""" return the cartesian coordinates of \vec{e}_r.
"""
try:
assert(self.r is not None)
assert(self.phi is not None)
assert(self.theta is not None)
except (AttributeError, NameError, AssertionError):
self.add_seismo()
phi = self.phi / 180. * np.pi
theta = (90. - self.theta) * np.pi / 180.
return np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi), np.cos(theta)])
def proj_er(self, vector):
""" projection of a vector on e_r, the radial vector in spherical coordinates.
input: vector (cartesian coordinates)
output: scalar
"""
try:
assert(self.r is not None)
assert(self.phi is not None)
assert(self.theta is not None)
except (AttributeError, NameError, AssertionError):
self.add_seismo()
vx, vy, vz = vector[0], vector[1], vector[2] # cartesian coordinates
phi = self.phi / 180. * np.pi
theta = (90. - self.theta) * np.pi / 180.
return np.sin(theta) * np.cos(phi) * vx + np.sin(theta) * \
np.sin(phi) * vy + np.cos(theta) * vz
def random_point(self, set_method="uniform", depth=[0., 1.], rICB=1.):
""" Create a random point (not raypath)
set_method: type of the distribution.
Default is uniform over the sphere of radius self.rRICB = 1221.
"""
r = rICB - np.random.uniform(depth[0], depth[1])
phi = np.random.uniform(-180., 180.)
theta = (np.arccos(2 * np.random.uniform(0., 1.) - 1)
* 180. / np.pi) - 90
return r, theta, phi
# #TODO : set other methods of randomisation!
class SeismoPoint(Point):
""" Point instance initialized with 'seismic' coordinates
a, b, c : radius, theta (latitude) and phi (longitude) in degrees
"""
def __init__(self, a, b, c):
self.r, self.theta, self.phi = a, b, c
self.add_cartesian()
class CartesianPoint(Point):
""" Point instance initialized with cartesian coordinates
a, b, c: x, y, z
"""
def __init__(self, a, b, c):
self.x, self.y, self.z = a, b, c
self.add_seismo()
class RandomPoint(Point):
def __init__(self, method, depth, rIC=1.):
self.r, self.theta, self.phi = self.random_point(method, depth, rIC)
self.add_cartesian()
class Raypath():
""" Raypath inside Inner Core.
raypath are defined either by:
- bottom turning point + direction (useful for random trajectories)
- in and out points (at the surface of IC, useful if coming from real data set)
"""
def __init__(self):
self.points = None
self.bottom_turning_point = None
self.direction = None
self.in_point = None
self.out_point = None
def add_property(self, dict_property, brute_force=False):
""" add any property to the raypath.
dict_property has to be of the form {'property':value} and will give self.property= value
"""
for k, v in dict_property.items():
if brute_force:
setattr(self, k, v)
else:
try:
# do not set new attribute except if brute force is wanted.
getattr(self, k)
except AttributeError:
setattr(self, k, v)
else:
if getattr(self, k) is None:
setattr(self, k, v)
else:
if getattr(self, k) != v:
print(
'Attribute {} already defined with value {}. It has not been changed to {}.'.format(
k,
getattr(
self,
k),
v))
def add_b_t_point(self, point, brute_force=False):
""" Bottom turning point of the trajectory """
if self.bottom_turning_point is None:
self.bottom_turning_point = point
elif brute_force:
self.bottom_turning_point = point
else:
print("bottom_turning_point already defined. Values has not been changed.")
def straight_in_out(self, N):
""" Trajectory is a straight line between in and out points, with N points (in and out points not directly parts of the trajectory). """
try:
self.points = []
self.points, self.length = straight_trajectory(
self.in_point, self.out_point, N + 2)
except(NameError, AttributeError):
raise Exception("in and out points have not been defined!")
def straight_in_out_bt(self, N):
""" Trajectory is a straight line between in and out points, with 2(N-2) points. """
if not (
self.in_point is None or self.out_point is None or self.bottom_turning_point is None):
points1, length1 = straight_trajectory(
self.in_point, self.bottom_turning_point, N)
points2, length2 = self.straight_trajectory(
self.bottom_turning_point, self.out_point, N)
self.points = []
self.length = length1 + length2
self.points = points1 + self.bottom_turning_point + points2
else:
raise Exception(
"in, out or bottom turning points have not been defined!")
def calc_zeta(self):
""" zeta is the angle with rotation (vertical) axis.
in and out points are required.
"""
# defining the axis
ax_x, ax_y, ax_z = 0, 0, 1
vec_ax = [ax_x, ax_y, ax_z]
# defining the trajectory vector
try:
x1, y1, z1 = self.in_point.x, self.in_point.y, self.in_point.z,
x2, y2, z2 = self.out_point.x, self.out_point.y, self.out_point.z,
except (NameError, AttributeError):
raise Exception("in and out points have not been defined!")
traj_x, traj_y, traj_z = x2 - x1, y2 - y1, z2 - z1
norm = np.sqrt(traj_x**2 + traj_y**2 + traj_z**2)
traj_x, traj_y, traj_z = traj_x / norm, traj_y / norm, traj_z / norm
vec_traj = [traj_x, traj_y, traj_z]
def angle_btwn(vec_a, vec_b):
""" angle between vector a and vector b
return angle in degree
"""
costheta = np.dot(vec_a, vec_b)
angle = np.arccos(costheta) * 180 / np.pi
if angle > 90.:
angle = 180. - angle
return angle # np.abs(angle)
self.direction = angle_btwn(vec_ax, vec_traj)
return self.direction
def calc_in_out_with_zeta_bt(self, | |
from collections import Mapping, Sequence, Sized, namedtuple as nt
from moodle.fieldnames import JsonFieldNames as Jn
import logging
log = logging.getLogger('moodle.responses')
class JsonWrapper(Sized):
def __len__(self):
return len(self._data)
def __init__(self, json):
self._data = json
@property
def raw(self): return self._data
class JsonListWrapper(JsonWrapper, Sequence):
def __getitem__(self, index):
return self._data[index]
def __init__(self, json_list):
if not issubclass(type(json_list), Sequence):
raise TypeError(f'received type {type(json_list)}, expected Sequence')
super().__init__(json_list)
def __iter__(self):
raise NotImplementedError('__iter__')
def get(self, index):
try:
return self._data[index]
except Exception as e:
print(index)
raise e
class JsonDictWrapper(JsonWrapper, Mapping):
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
"""
Search for key.
KeyError will be thrown, if the key cannot be found.
"""
try:
return self._data[key]
except KeyError:
raise
def __init__(self, json_dict):
if not issubclass(type(json_dict), Mapping):
raise TypeError(f'received type {type(json_dict)}, expected Mapping')
super().__init__(json_dict)
__marker = object()
def get(self, key, default=__marker):
try:
return self._data[key]
except KeyError:
if default is self.__marker:
raise
else:
return default
class CourseListResponse(JsonListWrapper):
def __iter__(self):
for course in self._data:
yield self.Course(course)
class Course(JsonDictWrapper):
""" optional:
summary string Optional //summary
summaryformat int Optional //summary format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
format string Optional //course format: weeks, topics, social, site
showgrades int Optional //true if grades are shown, otherwise false
lang string Optional //forced course language
enablecompletion int Optional //true if completion is enabled, otherwise false
"""
@property
def id(self): return self[Jn.id]
@property
def short_name(self): return self[Jn.short_name]
@property
def full_name(self): return self[Jn.full_name]
@property
def enrolled_user_count(self): return self[Jn.enrolled_user_count]
@property
def id_number(self): return self[Jn.id_number]
@property
def visible(self): return self[Jn.visible]
def __str__(self): return f'{self.full_name[0:39]:40} id:{self.id:5d} short: {self.short_name}'
class EnrolledUsersListResponse(JsonListWrapper):
""" optional, unimplemented object {
username string Optional //Username policy is defined in Moodle security config
firstname string Optional lastname string Optional
email string Optional address string Optional phone1 string Optional phone2 string Optional
icq string Optional skype string Optional yahoo string Optional aim string Optional
msn string Optional department string Optional institution string Optional
idnumber string Optional
interests string Optional //user interests (separated by commas)
firstaccess int Optional //first access to the site (0 if never)
lastaccess int Optional //last access to the site (0 if never)
description string Optional //User profile description
descriptionformat int Optional //description format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
city string Optional //Home city of the user
url string Optional //URL of the user
country string Optional //Home country code of the user, such as AU or CZ
profileimageurlsmall string Optional //User image profile URL - small version
profileimageurl string Optional //User image profile URL - big version
customfields Optional //User custom fields (also known as user profil fields)
list of ( object {
type string //The type of the custom field - text field, checkbox...
value string //The value of the custom field
name string //The name of the custom field
shortname string //The shortname of the custom field - to be able to build the field class in the code})
preferences Optional //User preferences
list of (object {
name string //The name of the preferences
value string //The value of the custom field })
enrolledcourses Optional //Courses where the user is enrolled - limited by which courses the user is able to see
list of (object {
id int //Id of the course
fullname string //Fullname of the course
shortname string //Shortname of the course})}"""
def __iter__(self):
for user in self._data:
yield self.User(user)
class User(JsonDictWrapper):
@property
def id(self): return self[Jn.id]
@property
def full_name(self): return self[Jn.full_name]
@property
def groups(self): return self.GroupsList(self.get(Jn.groups, []))
@property
def roles(self): return self.RolesList(self.get(Jn.roles, []))
class GroupsList(JsonListWrapper):
def __iter__(self):
for group in self._data:
yield self.Group(group)
class Group(JsonDictWrapper):
@property
def id(self): return self[Jn.id]
@property
def name(self): return self[Jn.name]
@property
def description(self): return self[Jn.description]
@property # description format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN
def description_format(self): return self[Jn.description_format]
class RolesList(JsonListWrapper):
def __iter__(self):
for role in self._data:
yield self.Role(role)
class Role(JsonDictWrapper):
@property
def role_id(self): return self[Jn.role_id]
@property
def name(self): return self[Jn.name]
@property
def short_name(self): return self[Jn.short_name]
@property
def sort_order(self): return self[Jn.sort_order]
class CourseAssignmentResponse(JsonDictWrapper):
@property
def warnings(self): return self.WarningList(self[Jn.warnings])
@property
def courses(self): return self.CourseList(self[Jn.courses])
class WarningList(JsonListWrapper):
def __iter__(self):
for warning in self._data:
yield self.Warning(warning)
class Warning(JsonDictWrapper):
""" item string Optional //item can be 'course' (errorcode 1 or 2) or 'module' (errorcode 1)
itemid int Optional //When item is a course then itemid is a course id.
When the item is a module then itemid is a module id
warningcode string //errorcode can be 1 (no access rights) or 2 (not enrolled or no permissions)
message string //untranslated english message to explain the warning})}"""
@property
def warning_code(self): return self[Jn.warning_code]
@property
def message(self): return self[Jn.message]
class CourseList(JsonListWrapper):
def __iter__(self):
for course in self._data:
yield self.Course(course)
class Course(JsonDictWrapper):
@property
def id(self): return self[Jn.id]
@property
def short_name(self): return self[Jn.short_name]
@property
def full_name(self): return self[Jn.full_name]
@property
def time_modified(self): return self[Jn.time_modified]
@property
def assignments(self): return self.AssignmentList(self[Jn.assignments])
class AssignmentList(JsonListWrapper):
def __iter__(self): # CourseAssignmentListResponse
for assignment in self._data:
yield self.Assignment(assignment)
class Assignment(JsonDictWrapper):
""" unimplemented fields:
nosubmissions int //no submissions
submissiondrafts int //submissions drafts
sendnotifications int //send notifications
sendlatenotifications int //send notifications
sendstudentnotifications int //send student notifications (default)
allowsubmissionsfromdate int //allow submissions from date
timemodified int //last time assignment was modified
completionsubmit int //if enabled, set activity as complete following submission
cutoffdate int //date after which submission is not accepted without an extension
requireallteammemberssubmit int //if enabled, all team members must submit
teamsubmissiongroupingid int //the grouping id for the team submission groups
blindmarking int //if enabled, hide identities until reveal identities actioned
revealidentities int //show identities for a blind marking assignment
attemptreopenmethod string //method used to control opening new attempts
maxattempts int //maximum number of attempts allowed
markingworkflow int //enable marking workflow
markingallocation int //enable marking allocation
requiresubmissionstatement int //student must accept submission statement
configs //configuration settings
list of ( object { //assignment configuration object
intro string Optional //assignment intro, not allways returned
because it deppends on the activity configuration
introformat int Optional //intro format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
introattachments Optional //intro attachments files
list of ( object {
filename string //file name
mimetype string //mime type
fileurl string //file download url})})})"""
@property
def id(self): return self[Jn.id]
@property
def course_id(self): return self[Jn.course]
@property
def time_modified(self): return self[Jn.time_modified]
@property
def is_team_submission(self): return 1 == self[Jn.team_submission]
@property
def name(self): return self[Jn.name]
@property # documentation states, this would be the grade 'type'. Go figure?
def max_points(self): return self[Jn.grade]
@property
def due_date(self): return self[Jn.due_date]
@property
def course_module_id(self): return self[Jn.course_module_id]
@property
def configurations(self): return self.AssignmentConfigList(self[Jn.configs])
class AssignmentConfigList(JsonListWrapper):
def __iter__(self):
for config in self._data:
yield self.AssignmentConfig(config)
class AssignmentConfig(JsonDictWrapper):
@property
def id(self): return self[Jn.id]
@property
def assignment_id(self): return self[Jn.assignment]
@property
def name(self): return self[Jn.name]
@property
def plugin(self): return self[Jn.plugin]
@property
def sub_type(self): return self[Jn.sub_type]
@property
def value(self): return self[Jn.value]
class AssignmentSubmissionResponse(JsonDictWrapper):
def print_warnings(self):
for warning in self.warnings:
if warning.warning_code == "3":
# no (new) submissions, can ignore
pass
else:
log.warning(f'{warning.warning_code}: {warning.message}')
@property
def warnings(self): return self.WarningList(self[Jn.warnings])
@property
def assignments(self): return self.AssignmentList(self[Jn.assignments])
class WarningList(JsonListWrapper):
def __iter__(self):
for warning in self._data:
yield self.Warning(warning)
class Warning(JsonDictWrapper):
"""
item string Optional //item
itemid int Optional //item id
warningcode string //the warning code can be used by the client app to implement specific behaviour
message string //untranslated english message to explain the warning})}
"""
@property
def warning_code(self): return self[Jn.warning_code]
@property
def message(self): return self[Jn.message]
class AssignmentList(JsonListWrapper):
def __iter__(self):
for assignment in self._data:
yield self.Assignment(assignment)
class Assignment(JsonDictWrapper):
@property
def id(self):
return self[Jn.assignment_id]
@property
def submissions(self): return self.SubmissionList(self[Jn.submissions])
class SubmissionList(JsonListWrapper):
def __iter__(self):
for submission in self._data:
yield self.Submission(submission)
class Submission(JsonDictWrapper):
@property
def id(self): return self[Jn.id]
@property
def user_id(self): return self[Jn.user_id]
@property
def group_id(self): return self[Jn.group_id]
@property
def time_modified(self): return self[Jn.time_modified]
@property
def time_created(self): return self[Jn.time_created]
@property
def status(self): return self[Jn.status]
@property
def attempt_number(self): return self[Jn.attempt_number]
@property
def plugin_list(self): return self.PluginList(self.get(Jn.plugins, []))
class PluginList(JsonListWrapper):
def __iter__(self):
for plugin in self._data:
yield self.Plugin(plugin)
class Plugin(JsonDictWrapper):
@property
def type(self): return self[Jn.type]
@property
def name(self): return | |
import lmfit
from time import time
class ModelFit:
""" We collect all information related to a fit between a pygom model and a set of data in this class
It has access to the model structure and defines all required parameters and details of fit """
def dumpparams(self,run_id=''): # Have to add self since this will become a method
"""stores params in a file './params/Model_Name.pk'
This stuff needs modules os, sys, pickle as pk.
If run_id is nonempty, it is used to construct the filename, and self.run_id is set to its value."""
mname = self.modelname
country = self.dbparams['country']
rname = self.run_id
dirnm = os.getcwd()
if run_id != '': # if run_id, turn it into self.run_id and use it for output filename
if run_id != rname:
print("warning: changing run_id from ",rname,'to',run_id)
self.run_id = run_id
else:
run_id = self.run_id # should always be something from __init__
pfile = dirnm+'/params/'+run_id+'.pk'
self.paramfile = pfile
try:
all_params = {'params':self.params,
'sbparams':self.sbparams,
'fbparams':self.fbparams,
'cbparams':self.cbparams,
'dbparams':self.dbparams,
'initial_values':self.initial_values
}
with open(pfile,'wb') as fp:
pk.dump(all_params,fp)
#print('dumped params to',pfile)
except:
print('problem dumping params to ',pfile)
def loadparams(self,run_id=''):
"""loads params from same file. returns None if any problem finding the file.
This stuff needs modules os, sys, pickle as pk.
If run_id is nonempty, it is used to construct the filename, and self.run_id is set to its value."""
if run_id == '':
run_id = self.run_id
elif self.run_id != run_id:
print("warning: changing run_id from ",self.run_id,'to',run_id)
self.run_id = run_id
dirnm = os.getcwd()
pfile = dirnm+'/params/'+run_id+'.pk'
self.paramfile = pfile
try:
with open(pfile,'rb') as fp:
all_params = pk.load(fp)
print('loaded params from ',pfile,':')
except:
print("For this run_id, a fresh file: ",pfile)
return None
#print('------- params from file:')
#ppr.pprint(all_params)
# check to see that all params being loaded match params of model, if not: fail.
for pp in ['params','sbparams','fbparams','cbparams','dbparams']:
try:
ppp = eval('self.'+pp) # fail first time when ModelFit doesn't have params.
selfkk = [kk for kk in ppp]
newkk = [k for k in all_params[pp]]
if newkk != selfkk:
print("params don't match when loading the params from ",pfile)
print('old keys:',selfkk)
print('new keys:',newkk)
return None
except:
pass # ok to fail 1st time
try:
self.params = all_params['params']
self.model.parameters = self.params
self.sbparams = all_params['sbparams']
self.fbparams = all_params['fbparams']
self.cbparams = all_params['cbparams']
self.dbparams = all_params['dbparams']
self.initial_values = all_params['initial_values'] # will get copied properly?
except:
print('problem loading the params from ',pfile)
return None
return True
def set_param(self,param,value):
plist = [p.name for p in self.model.param_list]
if param not in plist:
print('Error: param name',param,'is not a parameter for this',self.modelname,'model.')
self.params[param] = value
tmp = {param:value}
self.model.parameters = tmp # pygom magic sets the right parameter in the model.parameters dictionary.
def set_initial_values(self,ival,t0=None):
# consistency check:
if len(self.initial_values[0]) != len(self.model.initial_values[0]):
print('warning: inconsistent initial values in model.')
if len(ival) != len(self.model.initial_values[0]):
print('error: initial value must be of length', len(self.model.initial_values[0]))
self.model.initial_values[0] = [x for x in ival]
self.initial_values[0] = [x for x in ival]
if t0 is not None:
self.model.initial_values[1] = t0
self.initial_values[1] = t0
def set_I0(self,logI_0):
I0 = 10**logI_0
self.model.initial_values[0][0] = 1.0 - I0
self.model.initial_values[0][2] = I0
self.initial_values[0][0] = 1.0 - I0
self.initial_values[0][2] = I0
def difference(self,datain):
dataout = np.zeros(np.shape(datain))
for i in range(1,len(datain)):
dataout[i,...] = datain[i,...]-datain[i-1,...]
return dataout
def rolling_average(self,datain,period):
(tmax,n) = np.shape(datain)
dataout = np.zeros((tmax,n),dtype=float)
moving_av = np.zeros(n,dtype=float)
for k in range(len(datain)):
if k-period >= 0:
moving_av[:] = moving_av[:] - datain[k-7,...]
moving_av[:] = moving_av[:] + datain[k,...]
dataout[k] = moving_av/min(float(period),float(k+1))
return dataout
def plotdata(self,dtypes=['confirmed','deaths']):
if type(dtypes)==str:
dtypes = [dtypes]
xx = np.array(range(len(self.tdata)-1))
print(len(xx))
print([(x,len(self.data[x])) for x in dtypes])
for dt in dtypes:
try:
yy = self.data[dt]
except:
print("data type '"+dt+"' not found.")
try:
plt.plot(xx,yy)
except:
print("couldn't plot xx,yy",xx,yy)
plt.show()
def get_fitdata(self,species=['deaths'],datasets=['new_deaths_corrected_smoothed']):
if not isinstance(species,list):
lspecies = [species]
ldatasets =[datasets]
else:
lspecies = species
ldatasets =datasets
if not len(datasets)==len(lspecies):
print('Error in input to get_fitdata: species and datasets parameters not same length')
#
tvec = self.tsim
tvec1 = tvec[1:]
fitdata = {}
if not self.data is {}:
for i,ls in enumerate(lspecies):
ds = ldatasets[i]
if ls == 'confirmed': # John corrected this Oct 1st, was 'deaths'
datmp = self.data[ds] # confirmed cases data, corrected by FracConfirmedDet
fitdata[ls] = [x/self.fbparams['FracConfirmedDet']/self.population for x in datmp]
elif ls == 'deaths':
datmp = self.data[ds] # deaths cases data, corrected by FracDeathsDet
fitdata[ls] = [x/self.fbparams['FracDeathsDet']/self.population for x in datmp]
else:
fitdata[ls] = np.array(self.data[ds])
else:
print('missing fit data')
for ls in lspecies:
fitdata[ls] = None
return fitdata
def solvefit(self,species = ['deaths'],datasets=['deaths_corrected_smoothed']):
fitdata = self.get_fitdata(species,datasets)
lspecies = [x for x in fitdata]
tmaxf = len(fitdata[lspecies[0]])
tvec = self.tsim
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
self.soln = scipy.integrate.odeint(self.model.ode, self.model.initial_values[0], tvec)
rtn = {}
slices = {}
for ls in lspecies:
if ls == 'deaths':
slices['deaths'] = self.model.deaths
if ls == 'confirmed':
slices['confirmed'] = self.model.confirmed
for ls in lspecies:
rtn[ls] = {}
rtn[ls]['data'] = np.array(fitdata[ls])
rtn[ls]['soln'] = self.soln[:,slices[ls]][:,0]
rtn[ls]['resid'] = rtn[ls]['soln']-rtn[ls]['data']
return rtn
def solvefitlog(self,species = ['deaths'],datasets=['deaths_corrected_smoothed']):
"""
like solvefit() but take log of data and soln before computing residual.
"""
fitdata = self.get_fitdata(species,datasets)
lspecies = [x for x in fitdata]
tmaxf = len(fitdata[lspecies[0]])
tvec = self.tsim
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
self.soln = scipy.integrate.odeint(self.model.ode, self.model.initial_values[0], tvec)
rtn = {}
slices = {}
for ls in lspecies:
if ls == 'deaths':
slices['deaths'] = self.model.deaths
if ls == 'confirmed':
slices['confirmed'] = self.model.confirmed
for ls in lspecies:
rtn[ls] = {}
rtn[ls]['data'] = np.array(fitdata[ls])
rtn[ls]['soln'] = self.soln[:,slices[ls]][:,0]
mn = min([x for x in fitdata[ls] if x>0])
fdat = [x if x > 0 else mn for x in fitdata[ls]]
lfdat = np.array([np.log(x) for x in fdat])
sdata = rtn[ls]['soln']
mn = min([x for x in sdata if x>0])
sdat = [x if x > 0 else mn for x in sdata]
lsdat = np.array([np.log(x) for x in sdat])
rtn[ls]['resid'] = lsdat - lfdat
self.logresid = [sdat,lsdat,fdat,lfdat,lsdat-lfdat]
return rtn
def solveplot(self, species=['confirmed'],summing='daily',averaging='weekly',mag = {'deaths':10},axis=None,
scale='linear',plottitle= '',label='',newplot = True, gbrcolors=False, figsize = None, outfile = None,datasets=['confirmed_corrected_smoothed']):
"""
solve ODEs and plot for fitmodel indicated
species : alternatives 'all', 'EI', 'confirmed', 'deaths', ...
tmax : max time for simulation
summing: type of summing smoothing options : 'daily', ...
averaging : None, 'daily', 'weekly'
fitdata : data to fit
axes : previous axes to plot on [None]
scale : alternative 'linear' or 'log'
plottitle : title for plot
label : label for curve when called as part of multicurve plot
newplot : whether to open new plot True/False
gbrcolors : color types to use
figsize : size of fig in inches (binary tuple)
"""
# tmax = self.tsim[-1]
# tvec=np.arange(0,tmax,1)
if not isinstance(species,list):
lspecies = [species]
ldatasets = [datasets]
else:
lspecies = species
ldatasets = datasets
dspecies = [dt if dt != 'caution_fraction' else 'stringency' for dt in lspecies]
mags = [mag[dt] if dt in mag.keys() else 1 for dt in dspecies]
tvec = self.tsim
tvec1 = tvec[1:]
if not self.data is {}:
fitdata = np.transpose(np.array([self.data[dt] for dt in datasets]))
else:
fitdata = None
if not fitdata is None:
tmaxf = len(fitdata)
if fitdata.ndim != 2:
print("error in number of dimensions of array")
tvecf=np.arange(0,tmaxf,1)
tvecf1 = tvecf[1:]
if newplot:
axis = None
if (figsize == None):
figsize=(8,6)
plt.figure(figsize=figsize)
# fig, axeslist = plt.subplots(1, nmodels, figsize=(nmodels*8,6))
smodel = self.modelname
model = self.model
self.soln = scipy.integrate.odeint(model.ode, model.initial_values[0], tvec[1::])
#Plot
# ax = axeslist[nm]
if axis == None:
ax = axis = plt.subplot(1,1,1)
else:
ax = axis
if scale == 'log': #Plot on log scale
ax.semilogy()
ax.set_ylim([0.00000001,1.0])
if summing == 'daily':
ssoln = self.difference(self.soln)
if not fitdata is None:
sfit = self.difference(fitdata)
else:
ssoln = self.soln
if not fitdata is None:
sfit = fitdata
if averaging == 'weekly':
srsoln = self.rolling_average(ssoln,7)
if not fitdata is None:
srfit = self.rolling_average(sfit,7)
else:
srsoln = ssoln
if not fitdata is None:
srfit = sfit
for ns,species in enumerate(lspecies):
if species == 'confirmed':
suma = np.sum(srsoln[:,model.confirmed],axis=1)*mags[ns]
if not fitdata is None:
ax.plot(tvec1,suma,label=label,color='green')
fita = srfit[1::,ns]*mags[ns]/self.fbparams['FracConfirmedDet']/self.population # confirmed cases data, | |
<gh_stars>1-10
import sys
import os
import pytest
import pandas as pd
from Medeina.Web import Web
from unittest.mock import MagicMock
from Medeina.config import *
# import Medeina.common as MC
from mock import patch
from unittest.mock import patch, MagicMock, call, mock_open
IDTRACKER = (
"numericCounter-b2ca94aee362f455a41493a0d28b98bc5074065b0f96cbb95028ead20b1c72ea"
)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
@patch.object(sys.modules["Medeina.Web"], "writeObjToDateStore")
def testAddingTaxaExceptionsFresh(patch_write, patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"panthera tigris": 1}
return {}
patch_retr.side_effect = retrDynamicReturn
species = "panthera tigris"
consumer = "family"
resource = "genus"
instance = Web(path="dir")
instance.add_taxonomic_exception(species, consumer, resource, True)
patch_write.assert_called_with(
"dir",
"reorderedTaxaInteractions",
{"panthera tigris": {"consumer": "family", "resource": "genus"}},
)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
@patch.object(sys.modules["Medeina.Web"], "writeObjToDateStore")
def testAddingTaxaExceptionsExisting(patch_write, patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"felis catus": 1, "panthera tigris": 2}
elif b == EXCEPTIONS:
return {"panthera tigris": {"consumer": "family", "resource": "genus"}}
return {}
patch_retr.side_effect = retrDynamicReturn
species = "felis catus"
consumer = "family"
resource = "genus"
instance = Web(path="dir")
instance.add_taxonomic_exception(species, consumer, resource, True)
patch_write.assert_called_with(
"dir",
"reorderedTaxaInteractions",
{
"<NAME>": {"consumer": "family", "resource": "genus"},
"felis catus": {"consumer": "family", "resource": "genus"},
},
)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
@patch.object(sys.modules["Medeina.Web"], "writeObjToDateStore")
def testAddingTaxaExceptionsInvalid(patch_write, patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2}
elif b == EXCEPTIONS:
return {}
return {}
patch_retr.side_effect = retrDynamicReturn
species = "felis"
consumer = "family"
resource = "genus"
instance = Web(path="dir")
with pytest.raises(ValueError):
instance.add_taxonomic_exception(species, consumer, resource, True)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByDatasetIdsExpected(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {1: {"dId": 2}, 2: {"dId": 1}, 3: {"dId": 1}, 4: {"dId": 2}}
elif b == DATASETS:
return {1: {}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_dataset_id([1])
assert newWeb.datasetMetas == {1: {}}
assert newWeb.linkMetas == {2: {"dId": 1}, 3: {"dId": 1}}
assert newWeb.interactions == {IDTRACKER: 5, 2: {1: [3], 3: [2]}}
assert newWeb.stringNames == {
"<NAME>": 1,
"<NAME>": 2,
"vul<NAME>": 3,
}
assert newWeb.taxa == {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByDatasetIdsEmpty(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {1: {"dId": 2}, 2: {"dId": 1}, 3: {"dId": 1}, 4: {"dId": 2}}
elif b == DATASETS:
return {1: {}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_dataset_id([])
assert newWeb.datasetMetas == {}
assert newWeb.linkMetas == {}
assert newWeb.interactions == {IDTRACKER: 5}
assert newWeb.stringNames == {}
assert newWeb.taxa == {}
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByDatasetIdsFull(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {1: {"dId": 2}, 2: {"dId": 1}, 3: {"dId": 1}, 4: {"dId": 2}}
elif b == DATASETS:
return {1: {}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_dataset_id([1, 2])
assert newWeb.datasetMetas == retrDynamicReturn(1, DATASETS)
assert newWeb.linkMetas == retrDynamicReturn(1, LINKS)
assert newWeb.interactions == retrDynamicReturn(1, WEB)
assert newWeb.stringNames == retrDynamicReturn(1, REALNAMES)
assert newWeb.taxa == retrDynamicReturn(1, TAXA)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testReplicatingWebExpected(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>atus": 1, "panthera tigris": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {1: {"dId": 2}, 2: {"dId": 1}, 3: {"dId": 1}, 4: {"dId": 2}}
elif b == DATASETS:
return {1: {}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.replicateWeb()
assert newWeb.datasetMetas == retrDynamicReturn(1, DATASETS)
assert newWeb.linkMetas == retrDynamicReturn(1, LINKS)
assert newWeb.interactions == retrDynamicReturn(1, WEB)
assert newWeb.stringNames == retrDynamicReturn(1, REALNAMES)
assert newWeb.taxa == retrDynamicReturn(1, TAXA)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testReplicatingWebBlank(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 0}
elif b == TAXA:
return {}
elif b == LINKS:
return {}
elif b == DATASETS:
return {}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.replicateWeb()
assert newWeb.datasetMetas == retrDynamicReturn(1, DATASETS)
assert newWeb.linkMetas == retrDynamicReturn(1, LINKS)
assert newWeb.interactions == retrDynamicReturn(1, WEB)
assert newWeb.stringNames == retrDynamicReturn(1, REALNAMES)
assert newWeb.taxa == retrDynamicReturn(1, TAXA)
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByObservationTypeAllOnLinks(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {
1: {"dId": 2, "evidencedBy": "observation"},
2: {"dId": 1, "evidencedBy": "observation"},
3: {"dId": 1},
4: {"dId": 2, "evidencedBy": "inferred"},
}
elif b == DATASETS:
return {1: {}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_observation_type(["observation"])
assert newWeb.datasetMetas == {1: {}, 2: {}}
assert newWeb.linkMetas == {
1: {"dId": 2, "evidencedBy": "observation"},
2: {"dId": 1, "evidencedBy": "observation"},
3: {"dId": 1},
}
assert newWeb.interactions == {2: {1: [1, 3], 3: [2]}, IDTRACKER: 5}
assert newWeb.stringNames == {
"<NAME>": 1,
"<NAME>": 2,
"vulpes vulpes": 3,
}
assert newWeb.taxa == {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByObservationTypeAllDatasetMetas(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {1: {"dId": 2}, 2: {"dId": 1}, 3: {"dId": 1}, 4: {"dId": 2}}
elif b == DATASETS:
return {1: {"evidencedBy": "observation"}, 2: {"evidencedBy": "inferred"}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_observation_type(["observation"])
assert newWeb.datasetMetas == {1: {"evidencedBy": "observation"}}
assert newWeb.linkMetas == {2: {"dId": 1}, 3: {"dId": 1}}
assert newWeb.interactions == {2: {1: [3], 3: [2]}, IDTRACKER: 5}
assert newWeb.stringNames == {
"<NAME>": 1,
"<NAME>": 2,
"vulpes vulpes": 3,
}
assert newWeb.taxa == {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringByObservationTypeMixedDatasetMetasAndLinkMetas(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "<NAME>": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
elif b == LINKS:
return {
1: {"dId": 2, "evidencedBy": "observation"},
2: {"dId": 1},
3: {"dId": 1},
4: {"dId": 2, "evidencedBy": "inferred"},
}
elif b == DATASETS:
return {1: {"evidencedBy": "observation"}, 2: {}}
return {}
patch_retr.side_effect = retrDynamicReturn
instance = Web(path="dir")
newWeb = instance.filter_by_observation_type(["observation"])
assert newWeb.datasetMetas == {1: {"evidencedBy": "observation"}, 2: {}}
assert newWeb.linkMetas == {
1: {"dId": 2, "evidencedBy": "observation"},
2: {"dId": 1},
3: {"dId": 1},
}
assert newWeb.interactions == {2: {1: [1, 3], 3: [2]}, IDTRACKER: 5}
assert newWeb.stringNames == {
"<NAME>": 1,
"<NAME>": 2,
"<NAME>": 3,
}
assert newWeb.taxa == {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
@patch.object(sys.modules["Medeina.Web"], "retrieveObjFromStore")
def testFilteringOnTaxa(patch_retr):
def retrDynamicReturn(a, b):
if b == REALNAMES:
return {"<NAME>": 1, "<NAME>": 2, "vulpes vulpes": 3}
elif b == EXCEPTIONS:
return {}
elif b == WEB:
return {IDTRACKER: 5, 2: {1: [1, 3], 3: [2]}, 3: {1: [4]}}
elif b == TAXA:
return {
1: {"family": "felidae"},
2: {"family": "felidae"},
3: {"family": "canidea"},
}
| |
"""Storm-centered radar images."""
import os
import copy
import glob
import numpy
from scipy.interpolate import interp1d as scipy_interp1d
import netCDF4
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_io import gridrad_io
from gewittergefahr.gg_io import myrorss_and_mrms_io
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import radar_sparse_to_full as radar_s2f
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import target_val_utils
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import interp
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
PADDING_VALUE = 0.
GRID_SPACING_TOLERANCE_DEG = 1e-4
AZ_SHEAR_GRID_SPACING_MULTIPLIER = 2
LABEL_FILE_EXTENSION = '.nc'
ELEVATION_COLUMN = 'elevation_m_asl'
GRIDRAD_TIME_INTERVAL_SEC = 300
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_REGEX = (
'[0-9][0-9][0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]'
)
FIRST_STORM_ROW_KEY = 'first_storm_image_row'
LAST_STORM_ROW_KEY = 'last_storm_image_row'
FIRST_STORM_COLUMN_KEY = 'first_storm_image_column'
LAST_STORM_COLUMN_KEY = 'last_storm_image_column'
NUM_TOP_PADDING_ROWS_KEY = 'num_padding_rows_at_top'
NUM_BOTTOM_PADDING_ROWS_KEY = 'num_padding_rows_at_bottom'
NUM_LEFT_PADDING_COLS_KEY = 'num_padding_columns_at_left'
NUM_RIGHT_PADDING_COLS_KEY = 'num_padding_columns_at_right'
ROTATED_NON_SHEAR_LATITUDES_COLUMN = 'rotated_lat_matrix_non_shear_deg'
ROTATED_NON_SHEAR_LONGITUDES_COLUMN = 'rotated_lng_matrix_non_shear_deg'
ROTATED_SHEAR_LATITUDES_COLUMN = 'rotated_lat_matrix_for_shear_deg'
ROTATED_SHEAR_LONGITUDES_COLUMN = 'rotated_lng_matrix_for_shear_deg'
STORM_IMAGE_MATRIX_KEY = 'storm_image_matrix'
FULL_IDS_KEY = 'full_storm_id_strings'
VALID_TIMES_KEY = 'valid_times_unix_sec'
RADAR_FIELD_NAME_KEY = 'radar_field_name'
RADAR_HEIGHT_KEY = 'radar_height_m_agl'
ROTATED_GRIDS_KEY = 'rotated_grids'
ROTATED_GRID_SPACING_KEY = 'rotated_grid_spacing_metres'
LABEL_VALUES_KEY = 'label_values'
RADAR_FIELD_NAMES_KEY = 'radar_field_names'
RADAR_HEIGHTS_KEY = 'radar_heights_m_agl'
IMAGE_FILE_NAMES_KEY = 'image_file_name_matrix'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_agl'
ROW_DIMENSION_KEY = 'grid_row'
COLUMN_DIMENSION_KEY = 'grid_column'
CHARACTER_DIMENSION_KEY = 'storm_id_character'
STORM_OBJECT_DIMENSION_KEY = 'storm_object'
STORM_COLUMNS_NEEDED = [
tracking_utils.FULL_ID_COLUMN, tracking_utils.VALID_TIME_COLUMN,
tracking_utils.SPC_DATE_COLUMN, tracking_utils.CENTROID_LATITUDE_COLUMN,
tracking_utils.CENTROID_LONGITUDE_COLUMN,
tracking_utils.EAST_VELOCITY_COLUMN, tracking_utils.NORTH_VELOCITY_COLUMN
]
# Highest and lowest points in continental U.S.
LOWEST_POINT_IN_CONUS_M_ASL = -100.
HIGHEST_POINT_IN_CONUS_M_ASL = 4500.
DEFAULT_NUM_IMAGE_ROWS = 32
DEFAULT_NUM_IMAGE_COLUMNS = 32
DEFAULT_ROTATED_GRID_SPACING_METRES = 1500.
DEFAULT_RADAR_HEIGHTS_M_AGL = numpy.linspace(1000, 12000, num=12, dtype=int)
DEFAULT_MYRORSS_MRMS_FIELD_NAMES = [
radar_utils.ECHO_TOP_18DBZ_NAME, radar_utils.ECHO_TOP_50DBZ_NAME,
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME,
radar_utils.REFL_NAME, radar_utils.REFL_COLUMN_MAX_NAME,
radar_utils.REFL_0CELSIUS_NAME, radar_utils.REFL_M10CELSIUS_NAME,
radar_utils.REFL_M20CELSIUS_NAME, radar_utils.REFL_LOWEST_ALTITUDE_NAME,
radar_utils.MESH_NAME, radar_utils.SHI_NAME, radar_utils.VIL_NAME
]
DEFAULT_GRIDRAD_FIELD_NAMES = [
radar_utils.REFL_NAME, radar_utils.SPECTRUM_WIDTH_NAME,
radar_utils.VORTICITY_NAME, radar_utils.DIVERGENCE_NAME
]
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
def _check_extraction_args(
num_storm_image_rows, num_storm_image_columns, rotate_grids,
rotated_grid_spacing_metres, radar_field_names, radar_source,
radar_heights_m_agl=None, reflectivity_heights_m_agl=None):
"""Checks input args for extraction of storm-centered radar images.
Specifically, this method checks input args for
`extract_storm_images_myrorss_or_mrms` or `extract_storm_images_gridrad`.
:param num_storm_image_rows: Number of rows in each storm-centered image.
Must be even.
:param num_storm_image_columns: Number columns in each storm-centered image.
Must be even.
:param rotate_grids: Boolean flag. If True, each grid will be rotated so
that storm motion is in the +x-direction; thus, storm-centered grids
will be equidistant. If False, each storm-centered grid will be a
contiguous rectangle extracted from the full grid; thus, storm-centered
grids will be lat-long.
:param rotated_grid_spacing_metres: [used only if rotate_grids = True]
Spacing between grid points in adjacent rows or columns.
:param radar_field_names: 1-D list with names of radar fields.
:param radar_source: Data source (must be accepted by
`radar_utils.check_data_source`).
:param radar_heights_m_agl: [may be None]
1-D list of radar heights (metres above ground level). One storm-
centered image will be created for each tuple of storm object, field in
`radar_field_names`, and height in `radar_heights_m_agl`.
:param reflectivity_heights_m_agl: [may be None]
1-D list of reflectivity heights (metres above ground level). One
storm-centered image will be created for each pair of storm object and
field in `radar_field_names` (other than "reflectivity_dbz"). One
storm-centered image will also be created for each pair of storm object
and "reflectivity_dbz" at height in `reflectivity_heights_m_agl`.
:raises: ValueError: if `num_storm_image_rows` or `num_storm_image_columns`
is not even.
"""
error_checking.assert_is_integer(num_storm_image_rows)
error_checking.assert_is_greater(num_storm_image_rows, 0)
if num_storm_image_rows != rounder.round_to_nearest(
num_storm_image_rows, 2):
error_string = (
'Number of rows per storm-centered image ({0:d}) should be even.'
).format(num_storm_image_rows)
raise ValueError(error_string)
error_checking.assert_is_integer(num_storm_image_columns)
error_checking.assert_is_greater(num_storm_image_columns, 0)
if num_storm_image_columns != rounder.round_to_nearest(
num_storm_image_columns, 2):
error_string = (
'Number of columns per storm-centered image ({0:d}) should be even.'
).format(num_storm_image_columns)
raise ValueError(error_string)
error_checking.assert_is_boolean(rotate_grids)
if rotate_grids:
error_checking.assert_is_greater(rotated_grid_spacing_metres, 0.)
error_checking.assert_is_string_list(radar_field_names)
error_checking.assert_is_numpy_array(
numpy.array(radar_field_names), num_dimensions=1)
radar_utils.check_data_source(radar_source)
if radar_source == radar_utils.GRIDRAD_SOURCE_ID:
error_checking.assert_is_geq_numpy_array(radar_heights_m_agl, 0)
error_checking.assert_is_numpy_array(
numpy.array(radar_heights_m_agl), num_dimensions=1)
elif reflectivity_heights_m_agl is not None:
error_checking.assert_is_geq_numpy_array(
reflectivity_heights_m_agl, 0)
error_checking.assert_is_numpy_array(
numpy.array(reflectivity_heights_m_agl), num_dimensions=1)
def _check_grid_spacing(
new_metadata_dict, orig_lat_spacing_deg, orig_lng_spacing_deg):
"""Ensures consistency between grid spacing in new and original radar files.
:param new_metadata_dict: Dictionary created by
`myrorss_and_mrms_io.read_metadata_from_raw_file` or
`gridrad_io.read_metadata_from_full_grid_file`.
:param orig_lat_spacing_deg: Spacing (deg N) between meridionally adjacent
grid points in original file.
:param orig_lng_spacing_deg: Spacing (deg E) between zonally adjacent grid
points in original file.
:return: orig_lat_spacing_deg: See above.
:return: orig_lng_spacing_deg: See above.
:raises: ValueError: if grid spacings are inconsistent.
"""
is_field_az_shear = (
radar_utils.FIELD_NAME_COLUMN in new_metadata_dict and
new_metadata_dict[radar_utils.FIELD_NAME_COLUMN] in
AZIMUTHAL_SHEAR_FIELD_NAMES
)
if is_field_az_shear:
new_lat_spacing_deg = (
AZ_SHEAR_GRID_SPACING_MULTIPLIER *
new_metadata_dict[radar_utils.LAT_SPACING_COLUMN])
new_lng_spacing_deg = (
AZ_SHEAR_GRID_SPACING_MULTIPLIER *
new_metadata_dict[radar_utils.LNG_SPACING_COLUMN])
else:
new_lat_spacing_deg = new_metadata_dict[radar_utils.LAT_SPACING_COLUMN]
new_lng_spacing_deg = new_metadata_dict[radar_utils.LNG_SPACING_COLUMN]
new_lat_spacing_deg = rounder.round_to_nearest(
new_lat_spacing_deg, GRID_SPACING_TOLERANCE_DEG)
new_lng_spacing_deg = rounder.round_to_nearest(
new_lng_spacing_deg, GRID_SPACING_TOLERANCE_DEG)
if orig_lat_spacing_deg is None:
orig_lat_spacing_deg = new_lat_spacing_deg + 0.
orig_lng_spacing_deg = new_lng_spacing_deg + 0.
if (orig_lat_spacing_deg != new_lat_spacing_deg or
orig_lng_spacing_deg != new_lng_spacing_deg):
error_string = (
'Original file has grid spacing of {0:.4f} deg N, {1:.4f} deg E. '
'New file has spacing of {2:.4f} deg N, {3:.4f} deg E.'
).format(orig_lat_spacing_deg, orig_lng_spacing_deg,
new_lat_spacing_deg, new_lng_spacing_deg)
raise ValueError(error_string)
return orig_lat_spacing_deg, orig_lng_spacing_deg
def _check_storm_images(
storm_image_matrix, full_id_strings, valid_times_unix_sec,
radar_field_name, radar_height_m_agl, rotated_grids,
rotated_grid_spacing_metres=None):
"""Checks storm-centered radar images for errors.
L = number of storm objects
M = number of rows in each image
N = number of columns in each image
:param storm_image_matrix: L-by-M-by-N numpy array of storm-centered radar
measurements.
:param full_id_strings: length-L list of full storm IDs.
:param valid_times_unix_sec: length-L numpy array of storm times.
:param radar_field_name: Name of radar field.
:param radar_height_m_agl: Height (metres above ground level) of radar
field.
:param rotated_grids: Boolean flag. If True, each grid is rotated so that
storm motion is in the +x-direction.
:param rotated_grid_spacing_metres: [used iff `rotate_grids = True`]
Spacing between grid points in adjacent rows or columns.
"""
error_checking.assert_is_numpy_array_without_nan(storm_image_matrix)
error_checking.assert_is_numpy_array(storm_image_matrix, num_dimensions=3)
num_storm_objects = storm_image_matrix.shape[0]
these_expected_dim = numpy.array([num_storm_objects], dtype=int)
error_checking.assert_is_string_list(full_id_strings)
error_checking.assert_is_numpy_array(
numpy.array(full_id_strings), exact_dimensions=these_expected_dim
)
error_checking.assert_is_integer_numpy_array(valid_times_unix_sec)
error_checking.assert_is_numpy_array(
valid_times_unix_sec, exact_dimensions=these_expected_dim)
radar_utils.check_field_name(radar_field_name)
error_checking.assert_is_geq(radar_height_m_agl, 0)
error_checking.assert_is_boolean(rotated_grids)
if rotated_grids:
error_checking.assert_is_greater(rotated_grid_spacing_metres, 0.)
def _find_input_heights_needed(
storm_elevations_m_asl, desired_radar_heights_m_agl, radar_source):
"""Finds radar heights needed, in metres above sea level.
:param storm_elevations_m_asl: 1-D numpy array of storm elevations (metres
above sea level).
:param desired_radar_heights_m_agl: 1-D numpy array of desired radar heights
(metres above ground level).
:param radar_source: Data source (must be accepted by
`radar_utils.check_data_source`).
:return: desired_radar_heights_m_asl: 1-D numpy array of desired radar
heights (metres above sea level).
"""
min_radar_height_m_asl = (
numpy.min(storm_elevations_m_asl) +
numpy.min(desired_radar_heights_m_agl)
)
max_radar_height_m_asl = (
numpy.max(storm_elevations_m_asl) +
numpy.max(desired_radar_heights_m_agl)
)
desired_radar_heights_m_asl = radar_utils.get_valid_heights(
data_source=radar_source, field_name=radar_utils.REFL_NAME)
good_indices = numpy.where(numpy.logical_and(
desired_radar_heights_m_asl >= min_radar_height_m_asl,
desired_radar_heights_m_asl <= max_radar_height_m_asl
))[0]
if 0 not in good_indices:
these_indices = numpy.array([numpy.min(good_indices) - 1], dtype=int)
good_indices = numpy.concatenate((good_indices, these_indices))
max_possible_index = len(desired_radar_heights_m_asl) - 1
if max_possible_index not in good_indices:
these_indices = numpy.array([numpy.max(good_indices) + 1], dtype=int)
good_indices = numpy.concatenate((good_indices, these_indices))
return desired_radar_heights_m_asl[numpy.sort(good_indices)]
def _fields_and_heights_to_pairs(
radar_field_names, reflectivity_heights_m_agl, radar_source):
"""Converts lists of fields and reflectivity heights to field-height pairs.
C = number of field/height pairs
:param radar_field_names: 1-D list with names of radar fields.
:param reflectivity_heights_m_agl: 1-D numpy array of reflectivity heights
(only for the field "reflectivity_dbz", in metres above ground level).
:param radar_source: Data source (must be accepted by
`radar_utils.check_data_source`).
:return: field_name_by_pair: length-C list with names of radar fields.
:return: height_by_pair_m_agl: length-C numpy array of heights (metres above
ground level).
"""
error_checking.assert_is_numpy_array(
numpy.array(radar_field_names), num_dimensions=1)
field_name_by_pair = []
height_by_pair_m_agl = []
for this_field_name in radar_field_names:
if this_field_name == radar_utils.REFL_NAME:
error_checking.assert_is_geq_numpy_array(
reflectivity_heights_m_agl, 0)
error_checking.assert_is_numpy_array(
reflectivity_heights_m_agl, num_dimensions=1)
field_name_by_pair += (
[this_field_name] * len(reflectivity_heights_m_agl))
height_by_pair_m_agl += reflectivity_heights_m_agl.tolist()
else:
this_height_m_agl = radar_utils.get_valid_heights(
data_source=radar_source, field_name=this_field_name)[0]
field_name_by_pair.append(this_field_name)
height_by_pair_m_agl.append(this_height_m_agl)
height_by_pair_m_agl = numpy.round(
numpy.array(height_by_pair_m_agl)
).astype(int)
return field_name_by_pair, height_by_pair_m_agl
def _get_relevant_storm_objects(
storm_object_table, valid_time_unix_sec, valid_spc_date_string):
"""Returns indices of relevant storm objects (at the given time & SPC date).
:param storm_object_table: See doc for
`extract_storm_images_myrorss_or_mrms` or
`extract_storm_images_gridrad`.
:param valid_time_unix_sec: Will find storm objects with this valid time.
:param valid_spc_date_string: Will find storm objects on this SPC date
(format "yyyymmdd").
:return: relevant_indices: 1-D numpy array with indices of relevant storm
objects.
"""
relevant_flags = numpy.logical_and(
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values ==
valid_time_unix_sec,
storm_object_table[tracking_utils.SPC_DATE_COLUMN].values ==
valid_spc_date_string
)
return numpy.where(relevant_flags)[0]
def _rotate_grid_one_storm_object(
centroid_latitude_deg, centroid_longitude_deg, eastward_motion_m_s01,
northward_motion_m_s01, num_storm_image_rows, num_storm_image_columns,
storm_grid_spacing_metres):
"""Generates lat-long coordinates for rotated, storm-centered grid.
The grid is rotated so that storm motion is in the +x-direction.
m = number of rows in storm-centered grid (must be even)
n = number of columns in storm-centered grid (must be even)
:param centroid_latitude_deg: Latitude (deg N) of storm centroid.
:param centroid_longitude_deg: Longitude (deg E) of storm centroid.
:param eastward_motion_m_s01: Eastward component of storm motion (metres per
second).
:param northward_motion_m_s01: Northward component of storm motion.
:param num_storm_image_rows: m in the above discussion.
:param num_storm_image_columns: n in the above discussion.
:param storm_grid_spacing_metres: Spacing between grid points in adjacent
rows or columns.
:return: grid_point_lat_matrix_deg: m-by-n numpy array with latitudes
(deg N) of grid points.
:return: grid_point_lng_matrix_deg: m-by-n numpy array with longitudes
(deg E) of grid points.
"""
storm_bearing_deg = geodetic_utils.xy_to_scalar_displacements_and_bearings(
x_displacements_metres=numpy.array([eastward_motion_m_s01]),
y_displacements_metres=numpy.array([northward_motion_m_s01])
)[-1][0]
this_max_displacement_metres | |
newdq |= npiece
# The result should be a new mask with reduced dimensionality
return newdq
def _generate_mask(self, data, dq, bitmask=1):
"""
Use the contents of the dq array to generate a numpy mask of the
same shape as the data array.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
bitmask: unsigned int
If specified, a mask for selecting particular bits
from the data quality values.
The default of 1 will match only bit zero.
None will match any non-zero data quality value.
:Returns:
mask: numpy mask
A mask which can be used with the data array.
"""
# print("+++ Generating mask from", data, "\nand", dq,
# "\nwith bitmask", bitmask)
# A mask can only be generated when both arrays exist and
# are not empty. The DATA array and DQ array must also be
# broadcastable.
if self._isvalid(data) and dq is not None:
# Ensure the data quality array is of unsigned integer type
# so bitwise operations are possible.
dq = np.asarray(dq, dtype=np.uint)
if data.ndim < dq.ndim and jmutil.can_broadcast(dq.shape, data.shape):
# The DQ array is larger than the array being masked.
# This is a special case.
# Shrink down the DQ array until the dimensions match.
shrunk_dq = self._shrink_dq(dq)
while (shrunk_dq.ndim > data.ndim):
shrunk_dq = self._shrink_dq(shrunk_dq)
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(shrunk_dq != 0)
else:
bad = np.where((shrunk_dq & bitmask) != 0)
maskdq[bad] = True
return maskdq
elif data.size >= dq.size and jmutil.can_broadcast(data.shape, dq.shape):
# Broadcast the DQ array onto something the same shape
# as the data array.
datadq = np.zeros(data.shape, dtype=np.uint) + dq
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(datadq != 0)
else:
bad = np.where((datadq & bitmask) != 0)
maskdq[bad] = True
return maskdq
else:
return ma.nomask # or None
else:
return ma.nomask # or None
def _generate_fill(self, data, fill_descr):
"""
Generate a fill value for a data array based on the masked array
plus a fill description.
:Parameters:
data: numpy array
The data array to be examined.
fill_descr: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
:Returns:
fill_value: number
The fill value
"""
# The data array must exist and must not be empty.
if self._isvalid(data):
if isinstance(fill_descr, str):
if fill_descr == 'min':
# Use the minimum unmasked value as the fill value
fill_value = data.min()
elif fill_descr == 'max':
# Use the maximum unmasked value as the fill value
fill_value = data.max()
elif fill_descr == 'mean':
# Use the mean unmasked value as the fill value
fill_value = data.mean()
elif fill_descr == 'median':
# Use the median unmasked value as the fill value
fill_value = data.median()
else:
# Use the default numpy fill value
fill_value = None
else:
# Assume the fill description is a number or None
fill_value = fill_descr
else:
fill_value = None
return fill_value
def _mask_array(self, data, dq, fill_value=None):
"""
Return a masked version of the given array.
NOTE: This function might introduce small rounding errors into
floating point data, so a value displayed as 3.00000005 before
masking might display as 3.000000048 afterwards. The difference
is insignificant, but it looks worse when displayed.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
fill_value: number
If specified, the value used to fill missing entries in the
data array. If not specified, a numpy default value will be
used.
:Returns:
masked_data: numpy masked array
A masked version of the original data array.
"""
maskdq = self._generate_mask(data, dq)
return ma.array(data, mask=maskdq, fill_value=fill_value)
def _combine_errors_maximum(self, error1, error2):
"""
Helper function to combine two error arrays and return the maximum.
Can be used when two data arrays are combined with a min or max
function, or are combined by resampling.
NOTE: This function is valid only when both error arrays are sampling
the same error source and you prefer to believe the most pessimistic
estimate. Use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
newerr = np.maximum(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_quadrature(self, error1, error2):
"""
Helper function to combine two error arrays in quadrature.
Can be used when two data arrays are added or subtracted.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = err1sq + err2sq
newerr = np.sqrt(sumsq)
else:
newerr = None
return newerr
def _combine_errors_multiplicative(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions can be used when two data arrays are multiplied,
so the sensitivity coefficient is proportional to the other
array's measurement data.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
data1sq = np.square(data1)
data2sq = np.square(data2)
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = (data2sq * err1sq) + (data1sq * err2sq)
#newerr = np.sqrt(sumsq) / (data1sq+data2sq) ???
newerr = np.sqrt(sumsq)
else:
# Without the data arrays the weighting is unknown.
return self._combine_errors_quadrature(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_divisive(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions is used when one data array is divided by
another, so the sensitivity coefficient for the first array
is proportional to the inverse of the second but the
sensitivity coefficient for the second array is proportional
to the first.
CHECK THE MATHS
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not | |
self.dec
if tar_ap // 2 == tar_ap / 2:
print(Warning('tar_ap must be odd, adding 1'))
tar_ap += 1
if sky_out // 2 == sky_out / 2:
print(Warning('sky_out must be odd, adding 1'))
sky_out += 1
if sky_in // 2 == sky_in / 2:
print(Warning('sky_out must be odd, adding 1'))
sky_in += 1
if (ra is not None) & (dec is not None) & (self.tpf is not None):
x,y = self.wcs.all_world2pix(ra,dec,0)
x = int(x + 0.5)
y = int(y + 0.5)
elif (x is None) & (y is None):
x,y = self.wcs.all_world2pix(self.ra,self.dec,0)
x = int(x + 0.5)
y = int(y + 0.5)
ap_tar = np.zeros_like(data[0])
ap_sky = np.zeros_like(data[0])
ap_tar[y,x]= 1
ap_sky[y,x]= 1
ap_tar = convolve(ap_tar,np.ones((tar_ap,tar_ap)))
ap_sky = convolve(ap_sky,np.ones((sky_out,sky_out))) - convolve(ap_sky,np.ones((sky_in,sky_in)))
ap_sky[ap_sky == 0] = np.nan
m = sigma_clip((self.ref)*ap_sky,sigma=2).mask
ap_sky[m] = np.nan
temp = np.nansum(data*ap_tar,axis=(1,2))
ind = temp < np.percentile(temp,40)
med = np.nanmedian(data[ind],axis=0)
med = np.nanmedian(data,axis=0)
if not self.diff:
data = data - self.ref
if mask is not None:
ap_sky = mask
ap_sky[ap_sky==0] = np.nan
sky_med = np.nanmedian(ap_sky*data,axis=(1,2))
sky_std = np.nanstd(ap_sky*data,axis=(1,2))
if self.diff:
tar = np.nansum(data*ap_tar,axis=(1,2))
else:
tar = np.nansum((data+self.ref)*ap_tar,axis=(1,2))
tar -= sky_med * tar_ap**2
tar_err = sky_std #* tar_ap**2
#tar[tar_err > 100] = np.nan
#sky_med[tar_err > 100] = np.nan
if self.tpf is not None:
time = self.tpf.time.mjd
lc = np.array([time, tar, tar_err])
sky = np.array([time, sky_med, sky_std])
if plot:
self.dif_diag_plot(ap_tar,ap_sky,lc = lc,sky=sky,data=data)
if savename is not None:
plt.savefig(savename + '_diff_diag.pdf', bbox_inches = "tight")
return lc, sky
def dif_diag_plot(self,ap_tar,ap_sky,lc=None,sky=None,data=None):
"""
Makes a plot showing the target light curve, sky, and difference image at the brightest point
in the target lc.
------
Inputs
------
ap_tar : array
aperture mask
ap_sky : array
sky mask
data : array (shape = 3)
sequence of images
------
Output
------
Figure
"""
if lc is None:
lc = self.lc
if sky is None:
sky = self.sky
if data is None:
data = self.flux
plt.figure(figsize=(3*fig_width,1*fig_width))
plt.subplot(121)
plt.fill_between(lc[0],sky[1]-sky[2],sky[1]+sky[2],alpha=.5,color='C1')
plt.plot(sky[0],sky[1],'C1.',label='Sky')
plt.fill_between(lc[0],lc[1]-lc[2],lc[1]+lc[2],alpha=.5,color='C0')
plt.plot(lc[0],lc[1],'C0.',label='Target')
binned = self.bin_data(lc=lc)
plt.plot(binned[0],binned[1],'C2.',label='6hr bin')
plt.xlabel('Time (MJD)',fontsize=15)
plt.ylabel('Flux ($e^-/s$)',fontsize=15)
plt.legend(loc=4)
plt.subplot(122)
ap = ap_tar
ap[ap==0] = np.nan
maxind = np.where((np.nanmax(lc[1]) == lc[1]))[0]
try:
maxind = maxind[0]
except:
pass
d = data[maxind]
nonan1 = np.isfinite(d)
nonan2 = np.isfinite(d*ap)
plt.imshow(data[maxind],origin='lower',
vmin=np.percentile(d[nonan1],16),
vmax=np.percentile(d[nonan2],80),
aspect='auto')
cbar = plt.colorbar()
cbar.set_label('$e^-/s$',fontsize=15)
plt.xlabel('Column',fontsize=15)
plt.ylabel('Row',fontsize=15)
#plt.imshow(ap,origin='lower',alpha = 0.2)
#plt.imshow(ap_sky,origin='lower',alpha = 0.8,cmap='hot')
y,x = np.where(ap_sky > 0)
plt.plot(x,y,'r.',alpha = 0.3)
y,x = np.where(ap > 0)
plt.plot(x,y,'C1.',alpha = 0.3)
return
def plotter(self,lc=None,ax = None,ground=False,time_bin=6/24):
"""
Simple plotter for light curves.
------
Inputs (Optional)
------
lc : np.array
light curve with dimensions of at least [2,n]
ax : matplotlib axes
existing figure axes to add data to
time_bin : float
time range to bin data to in days. ie 1 = 24 hours.
-------
Options
-------
ground : bool
if True then ground based data is plotted alongside TESS
"""
if ground:
if self.ground.ztf is None:
self.ground.get_ztf_data()
if self.lc_units.lower() == 'counts':
self.to_flux()
if lc is None:
lc = self.lc
av = self.bin_data(lc=lc,time_bin=time_bin)
if time_bin * 24 == int(time_bin * 24):
lab = int(time_bin * 24)
else:
lab = time_bin *24
if ax is None:
plt.figure(figsize=(1.5*fig_width,1*fig_width))
ax = plt.gca()
if lc.shape[0] > lc.shape[1]:
ax.plot(lc[:,0],lc[:,1],'k.',alpha = 0.4,ms=1,label='$TESS$')
ax.plot(av[:,0],av[:,1],'k.',label='$TESS$ {}hr'.format(lab))
else:
ax.plot(lc[0],lc[1],'.k',alpha = 0.4,ms=1,label='$TESS$')
ax.plot(av[0],av[1],'.k',label='$TESS$ {}hr'.format(lab))
if self.lc_units == 'AB mag':
ax.invert_yaxis()
if ground & (self.ground.ztf is not None):
gind = self.ground.ztf.fid.values == 'g'
rind = self.ground.ztf.fid.values == 'r'
ztfg = self.ground.ztf.iloc[gind]
ztfr = self.ground.ztf.iloc[rind]
ax.scatter(ztfg.mjd,ztfg.maglim,c='C2',s=.5,alpha = 0.6,marker='v',label='ZTF g non-detec')
ax.scatter(ztfr.mjd,ztfr.maglim,c='r',s=.5,alpha = 0.6,marker='v',label='ZTF r non-detec')
ax.errorbar(ztfg.mjd, ztfg.mag,yerr = ztfg.mag_e, c='C2', fmt='o', ms= 5, label='ZTF g')
ax.errorbar(ztfr.mjd, ztfr.mag,yerr = ztfr.mag_e, c='r', fmt='o', ms=5, label='ZTF r')
ax.set_ylabel('Apparent magnitude',fontsize=15)
else:
ax.set_ylabel('Flux (' + self.lc_units + ')',fontsize=15)
if ground & (self.ground.ztf is not None):
self.ground.to_flux(flux_type=self.lc_units)
gind = self.ground.ztf.fid.values == 'g'
rind = self.ground.ztf.fid.values == 'r'
ztfg = self.ground.ztf.iloc[gind]
ztfr = self.ground.ztf.iloc[rind]
ax.scatter(ztfg.mjd,ztfg.fluxlim,c='C2',alpha = 0.6,s=20,marker='v',label='ZTF g non-detec')
ax.scatter(ztfr.mjd,ztfr.fluxlim,c='r',alpha = 0.6,s=20,marker='v',label='ZTF r non-detec')
ax.errorbar(ztfg.mjd, ztfg.flux,yerr = ztfg.flux_e,ms=4, c='C2', fmt='o', label='ZTF g')
ax.errorbar(ztfr.mjd, ztfr.flux,yerr = ztfr.flux_e, ms=4, c='r', fmt='o', label='ZTF r')
ax.set_xlabel('Time (MJD)',fontsize=15 )
ax.legend()
return
def to_lightkurve(self,lc=None,flux_unit=None):
"""
Convert TESSreduce light curve into lighkurve.lightcurve object. Flux units are recorded
-----------------
Inputs (optional)
-----------------
lc : array
light curve with 2xn or 3xn shape
flux_unit : str
units of the light curve flux
Valid options:
counts
mjy
cgs
-------
Returns
-------
light : lightcurve
lightkurve lightcurve object. All lk function will work on this!
"""
if lc is None:
lc = self.lc
if flux_unit is None:
flux_unit = self.lc_units
if flux_unit.lower() == 'counts':
unit = u.electron/ u.s
elif flux_unit.lower() == 'mjy':
unit = 1e-3 * u.Jy
elif flux_unit.lower() == 'jy':
unit = u.Jy
elif flux_unit.lower() == 'cgs':
unit = u.erg/u.s/u.cm**2/u.Hz
else:
unit = 1
if lc.shape[0] == 3:
light = lk.LightCurve(time=Time(lc[0], format='mjd'),flux=lc[1] * unit,flux_err=lc[2] * unit)
else:
light = lk.LightCurve(time=Time(lc[0], format='mjd'),flux=lc[1] * unit)
return light
def reduce(self, aper = None, align = None, parallel = True, calibrate=True,
bin_size = 0, plot = True, mask_scale = 1,
diff_lc = True,diff=True,verbose=None, tar_ap=3,sky_in=7,sky_out=11,
moving_mask=None,mask=None,double_shift=False):
"""
Reduce the images from the target pixel file and make a light curve with aperture photometry.
This background subtraction method works well on tpfs > 50x50 pixels.
----------
Parameters
----------
aper : None, list, array
aperature to do photometry on
shift : bool
if True the flux array will be shifted to match the position of a reference
parallel : bool
if True parallel processing will be used for background estimation and centroid shifts
scale : str
options = [counts, magnitude, flux, normalise]
if True the light curve will be normalised to the median
bin_size : int
if > 1 then the lightcurve will be binned by that amount
all_output : bool
if True then the lc, flux, reference and background will be returned.
-------
Returns
-------
if all_output = True
lc : array
light curve
flux : array
shifted images to match the reference
ref : array
reference array used in image matching
bkg : array
array of background flux avlues for each image
else
lc : array
light curve
"""
# make reference
if parallel is not None:
self.parallel = parallel
if verbose is not None:
self.verbose = verbose
if (self.flux.shape[1] < 30) & (self.flux.shape[2] < 30):
small = True
else:
small = False
if align is not None:
self.align = align
if small & self.align:
print('Unlikely to get good shifts from a small tpf, so shift has been set to False')
self.align = False
self.get_ref()
if self.verbose > 0:
print('made reference')
# make source mask
if mask is None:
self.make_mask(maglim=18,strapsize=4,scale=mask_scale)#Source_mask(ref,grid=0)
frac = np.nansum((self.mask == 0) * 1.) / (self.mask.shape[0] * self.mask.shape[1])
#print('mask frac ',frac)
if frac < 0.05:
print('!!!WARNING!!! mask is too dense, lowering mask_scale to 0.5, and raising maglim to 15. Background quality will be reduced.')
self.make_mask(maglim=15,strapsize=4,scale=0.5)
if self.verbose > 0:
print('made source mask')
else:
self.mask = mask
if self.verbose > 0:
print('assigned source mask')
# calculate background for each frame
if self.verbose > 0:
print('calculating background')
# calculate the background
self.background()
if np.isnan(self.bkg).all():
# check to see if the background worked
raise ValueError('bkg all nans')
flux = strip_units(self.flux)
# subtract background from unitless flux
self.flux = flux - self.bkg
# get a ref with low background
self.ref = deepcopy(self.flux[self.ref_ind])
if self.verbose > 0:
print('background subtracted')
if np.isnan(self.flux).all():
raise ValueError('flux all nans')
if self.align:
if self.verbose > 0:
print('calculating centroids')
try:
self.centroids_DAO()
if double_shift:
self.shift_images()
self.ref = deepcopy(self.flux[self.ref_ind])
self.fit_shift()
except:
print('Something went wrong, switching to serial')
self.parallel = False
self.centroids_DAO()
#self.fit_shift()
if diff is not None:
self.diff = diff
if not self.diff:
if self.align:
self.shift_images()
self.flux[np.nansum(self.tpf.flux.value,axis=(1,2))==0] = np.nan
if self.verbose > 0:
print('images shifted')
if self.diff:
if self.verbose > 0:
print('!!Re-running for difference image!!')
# reseting to do diffim
self.flux = strip_units(self.tpf.flux)
if self.align:
self.shift_images()
if self.verbose > 0:
print('shifting images')
self.flux[np.nansum(self.tpf.flux.value,axis=(1,2))==0] = np.nan
# subtract reference
self.ref = deepcopy(self.flux[self.ref_ind])
self.flux -= self.ref
self.ref -= self.bkg[self.ref_ind]
# remake mask
self.make_mask(maglim=18,strapsize=4,scale=mask_scale*.5)#Source_mask(ref,grid=0)
frac = np.nansum((self.mask== 0) * 1.) / (self.mask.shape[0] * self.mask.shape[1])
#print('mask frac ',frac)
if frac < 0.05:
print('!!!WARNING!!! mask is too dense, lowering mask_scale to 0.5, and raising maglim to 15. Background quality will be reduced.')
self.make_mask(maglim=15,strapsize=4,scale=0.5)
# assuming that the target is in the centre, so masking it out
m_tar = np.zeros_like(self.mask,dtype=int)
m_tar[self.size//2,self.size//2]= 1
m_tar = convolve(m_tar,np.ones((5,5)))
self.mask = self.mask | m_tar
if moving_mask is not None:
temp = np.zeros_like(self.flux,dtype=int)
temp[:,:,:] = self.mask
self.mask = temp | moving_mask
if self.verbose > 0:
print('remade mask')
# background
if self.verbose > 0:
print('background')
self.background()
self.flux -= self.bkg
if calibrate:
print('Field calibration')
self.field_calibrate()
if diff_lc:
self.lc, self.sky = self.diff_lc(plot=True,tar_ap=tar_ap,sky_in=sky_in,sky_out=sky_out)
else:
self.make_lc(aperture=aper,bin_size=bin_size,
zeropoint = self.zp,scale=scale)#,normalise=False)
def make_lc(self,aperture = None,bin_size=0,zeropoint=None,scale='counts',clip = False):
"""
Perform aperature photometry on a time series of images
Parameters
----------
flux : array
t : array
time
aper : None, list, array
aperature to do aperature photometry on.
bin_size : int
number of points to average
normalise : bool
if true the light curve is normalised to the median
Returns
-------
lc : array
light curve for the pixels defined by the aperture
"""
# hack solution for new lightkurve
flux = strip_units(self.flux)
t = self.tpf.time.mjd
if type(aperture) == type(None):
aper = np.zeros_like(flux[0])
aper[int(aper.shape[0]/2),int(aper.shape[1]/2)] = 1
aper = convolve(aper,np.ones((3,3)))
temp = np.zeros_like(flux[0])
elif type(aperture) == list:
temp = np.zeros_like(flux[0])
temp[aperture[0],aperture[1]] = 1
aper = temp
elif type(aperture) == np.ndarray:
aper = aperture * 1.
lc = Lightcurve(flux,aper) #,scale = scale)
if clip:
mask = ~sigma_mask(lc)
lc[mask] = np.nan
if bin_size > 1:
lc, t = bin_data(t,lc,bin_size)
lc = np.array([t,lc])
if (zeropoint is not None) & (scale=='mag'):
lc[1,:] = -2.5*np.log10(lc[1,:]) + zeropoint
self.lc = lc
def lc_events(self,err=None,duration=10,sig=5):
"""
Use clustering to detect individual high SNR events in a light curve.
Clustering isn't incredibly robust, so it could be better.
-----------------
Inputs (optional)
-----------------
err : array
flux error to be used in weighting of events
duration : int
How long an event needs to last for before being detected
sig : float
significance of the detection above the background
--------
Returns
-------
self.events : list
list of light curves for all identified events
"""
lc = self.lc
ind = np.isfinite(lc[1])
lc = lc[:,ind]
mask = Cluster_cut(lc,err=err,sig=sig)
outliers = Identify_masks(mask)
good = np.nansum(outliers,axis=1) > duration
outliers = outliers[good]
print('Found {} events longer than {} frames at {} sigma'.format(outliers.shape[0],duration,sig))
temp = outliers * lc[1][np.newaxis,:]
lcs = []
for event in temp:
l = (self.lc[:2]).copy()
l[1,:] = np.nan
l[1,ind] = event
lcs += [l]
lcs = np.array(lcs)
lcs[lcs == 0] = np.nan
self.events = lcs
def event_plotter(self,**kwargs):
"""
Lazy plotting tool for checking the detected events.
"""
if self.events is None:
self.lc_events(**kwargs)
plt.figure()
plt.plot(self.lc[0],self.lc[1],'k.')
for i in range(len(self.events)):
plt.plot(self.events[i,0],self.events[i,1],'*',label='Event {}'.format(i))
plt.xlabel('MJD')
plt.ylabel('Flux')
def detrend_transient(self,lc=None,err=None,Mask=None,variable=False,sig = 5,
sig_up = 3, sig_low = 10, tail_length='auto',plot=False):
"""
Removes all | |
<reponame>jonathantribouharet/FrameworkBenchmarks
import json
import re
import traceback
from datetime import datetime
from toolset.utils.output_helper import log
from time import sleep
def basic_body_verification(body, url, is_json_check=True):
'''
Takes in a raw (stringy) response body, checks that it is non-empty,
and that it is valid JSON (i.e. can be deserialized into a dict/list of dicts)
Returns the deserialized body as a dict (or list of dicts), and also returns any
problems encountered, always as a list. If len(problems) > 0,
then the response body does not have to be examined further and the caller
should handle the failing problem(s).
Plaintext and Fortunes set `is_json_check` to False
'''
# Empty Response?
if body is None:
return None, [('fail', 'No response', url)]
elif len(body) == 0:
return None, [('fail', 'Empty response', url)]
# Valid JSON?
if is_json_check:
try:
response = json.loads(body)
return response, []
except ValueError as ve:
return None, [('fail', 'Invalid JSON: %s' % ve, url)]
# Fortunes and Plaintext only use this for the empty response tests
# they do not need or expect a dict back
return None, []
def verify_headers(request_headers_and_body, headers, url, should_be='json'):
'''
Verifies the headers of a framework response
param `should_be` is a switch for the three acceptable content types
'''
problems = []
for v in (v for v in ('Server', 'Date', 'Content-Type')
if v.lower() not in headers):
problems.append(('fail', 'Required response header missing: %s' % v,
url))
if all(v.lower() not in headers
for v in ('Content-Length', 'Transfer-Encoding')):
problems.append((
'fail',
'Required response size header missing, please include either "Content-Length" or "Transfer-Encoding"',
url))
date = headers.get('Date')
if date is not None:
expected_date_format = '%a, %d %b %Y %H:%M:%S %Z'
try:
datetime.strptime(date, expected_date_format)
except ValueError:
problems.append((
'warn',
'Invalid Date header, found \"%s\", did not match \"%s\".'
% (date, expected_date_format), url))
# Verify response content
# Make sure that the date object isn't cached
sleep(3)
second_headers, body2 = request_headers_and_body(url)
second_date = second_headers.get('Date')
date2 = second_headers.get('Date')
if date == date2:
problems.append((
'fail',
'Invalid Cached Date. Found \"%s\" and \"%s\" on separate requests.'
% (date, date2), url))
content_type = headers.get('Content-Type')
if content_type is not None:
types = {
'json': '^application/json(; ?charset=(UTF|utf)-8)?$',
'html': '^text/html; ?charset=(UTF|utf)-8$',
'plaintext': '^text/plain(; ?charset=(UTF|utf)-8)?$'
}
expected_type = types[should_be]
if not re.match(expected_type, content_type):
problems.append((
'fail',
'Invalid Content-Type header, found \"%s\", did not match \"%s\".'
% (content_type, expected_type), url))
return problems
def verify_helloworld_object(json_object, url):
'''
Ensure that the JSON object closely resembles
{ 'message': 'Hello, World!' }
'''
problems = []
try:
# Make everything case insensitive
json_object = {k.lower(): v.lower() for k, v in json_object.iteritems()}
except:
return [('fail', "Not a valid JSON object", url)]
if 'message' not in json_object:
return [('fail', "Missing required key 'message'", url)]
else:
json_len = len(json_object)
if json_len > 1:
additional = ', '.join(
[k for k in json_object.keys() if k != 'message'])
problems.append(
('warn', "Too many JSON key/value pairs, consider removing: %s"
% additional, url))
if json_len > 27:
problems.append(
'warn',
"%s additional response byte(s) found. Consider removing unnecessary whitespace."
% (json_len - 26))
message = json_object['message']
if message != 'hello, world!':
return [('fail',
"Expected message of 'hello, world!', got '%s'" % message,
url)]
return problems
def verify_randomnumber_object(db_object, url, max_infraction='fail'):
'''
Ensures that `db_object` is a JSON object with
keys 'id' and 'randomNumber' that both map to ints.
Should closely resemble:
{ "id": 2354, "randomNumber": 8952 }
'''
problems = []
# Dict is expected
# Produce error for bytes in non-cases
if type(db_object) is not dict:
got = str(db_object)[:20]
if len(str(db_object)) > 20:
got = str(db_object)[:17] + '...'
return [(max_infraction,
"Expected a JSON object, got '%s' instead" % got, url)]
# Make keys case insensitive
db_object = {k.lower(): v for k, v in db_object.iteritems()}
required_keys = set(['id', 'randomnumber'])
for v in (v for v in required_keys if v not in db_object):
problems.append(
(max_infraction,
'Response object was missing required key: %s' % v, url))
if len(db_object) > len(required_keys):
extras = set(db_object.keys()) - required_keys
problems.append(
('warn', 'An extra key(s) is being included with the db object: %s'
% ', '.join(extras), url))
# All required keys must be present
if len(problems) > 0:
return problems
# Assert key types and values
try:
o_id = int(db_object['id'])
if o_id > 10000 or o_id < 1:
problems.append((
'warn',
'Response key id should be between 1 and 10,000: ' + str(o_id),
url))
except TypeError as e:
problems.append(
(max_infraction,
"Response key 'id' does not map to an integer - %s" % e, url))
try:
o_rn = int(db_object['randomnumber'])
if o_rn > 10000:
problems.append((
'warn',
'Response key `randomNumber` is over 10,000. This may negatively affect performance by sending extra bytes',
url))
except TypeError as e:
problems.append(
(max_infraction,
"Response key 'randomnumber' does not map to an integer - %s" % e,
url))
return problems
def verify_randomnumber_list(expected_len,
headers,
body,
url,
max_infraction='fail'):
'''
Validates that the object is a list containing a number of
randomnumber object. Should closely resemble:
[{ "id": 2354, "randomNumber": 8952 }, { "id": 4421, "randomNumber": 32 }, ... ]
'''
response, problems = basic_body_verification(body, url)
if len(problems) > 0:
return problems
# This path will be hit when the framework returns a single JSON object
# rather than a list containing one element. We allow this with a warn,
# then verify the supplied object
if type(response) is not list:
problems.append(('warn', 'Top-level JSON is an object, not an array',
url))
problems += verify_randomnumber_object(response, url, max_infraction)
return problems
if any(type(item) is not dict for item in response):
problems.append(
(max_infraction,
'Not all items in the JSON array were JSON objects', url))
if len(response) != expected_len:
problems.append((max_infraction,
"JSON array length of %s != expected length of %s" %
(len(response), expected_len), url))
# Verify individual objects, arbitrarily stop after 5 bad ones are found
# i.e. to not look at all 500
badObjectsFound = 0
inner_objects = iter(response)
try:
while badObjectsFound < 5:
obj = next(inner_objects)
findings = verify_randomnumber_object(obj, url, max_infraction)
if len(findings) > 0:
problems += findings
badObjectsFound += 1
except StopIteration:
pass
return problems
def verify_updates(old_worlds, new_worlds, updates_expected, url):
'''
Validates that the /updates requests actually updated values in the database and didn't
just return a JSON list of the correct number of World items.
old_worlds a JSON object containing the state of the Worlds table BEFORE the /updates requests
new_worlds a JSON object containing the state of the Worlds table AFTER the /updates requests
If no items were updated, this validation test returns a "fail."
If only some items were updated (within a 5% margin of error), this test returns a "warn".
This is to account for the unlikely, but possible situation where an entry in the World
table is updated to the same value it was previously set as.
'''
successful_updates = 0
problems = []
n = 0
while n < len(old_worlds) and successful_updates == 0:
for i in range(1, 10001):
try:
entry_id = str(i)
if entry_id in old_worlds[n] and entry_id in new_worlds[n]:
if old_worlds[n][entry_id] != new_worlds[n][entry_id]:
successful_updates += 1
except Exception:
tb = traceback.format_exc()
log(tb)
n += 1
if successful_updates == 0:
problems.append(("fail", "No items were updated in the database.",
url))
elif successful_updates <= (updates_expected * 0.90):
problems.append((
"fail",
"Only %s items were updated in the database out of roughly %s expected."
% (successful_updates, updates_expected), url))
elif successful_updates <= (updates_expected * 0.95):
problems.append((
"warn",
"There may have been an error updating the database. Only %s items were updated in the database out of the roughly %s expected."
% (successful_updates, updates_expected), url))
return problems
def verify_query_cases(self, cases, url, check_updates=False):
'''
The /updates and /queries tests accept a `queries` parameter
that is expected to be between 1-500.
This method execises a framework with different `queries` parameter values
then verifies that the framework responds appropriately.
The `cases` parameter should be a list of 2-tuples containing the query case
and the consequence level should the cases fail its verifications, e.g.:
cases = [
('2', 'fail'),
('0', 'fail'),
('foo', 'fail'),
('501', 'warn'),
('', 'fail')
]
The reason for using 'warn' is generally | |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
Date: Aug 2017 2018
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as _plt
from ..simenv import SimEnv
from .. import precomp_funs as _pf
class Pump(SimEnv):
"""
type: Pump class.
One pump is required in each contiguous seaction of the model environment
which is not separated from other sections by a hydraulic compensator like
a thermal energy storage.
The pumps determine the mass flows in these contiguous sections.
The mass flow is calculated after each timestep and intermediate step
depending on the given control algorithm and the measured values in the
specified measuring port.
The Pump class does not contain a differential method as it only passes the
values of the part connected to its 'in'-port to its 'out'-port and the
values of the part connected to its 'out'-port to its 'in'-port. Thus it is
not involved in solving the equations using the specified solver algorithm.
"""
def __init__(self, name, master_cls, **kwargs):
self._models = master_cls
self.constr_type = 'Pump' # define construction type
base_err = ( # define leading base error message
'While adding {0} `{1}` to the simulation '
'environment, the following error occurred:\n'
).format(self.constr_type, str(name))
arg_err = ( # define leading error for missing/incorrect argument
'Missing argument or incorrect type/value: {0}\n\n'
)
self._base_err = base_err # save to self to access it in controllers
self._arg_err = arg_err # save to self to access it in controllers
# super().__init__()
self.name = name
self._unit = '[kg/s]' # unit of the actuator
self.part_id = self._models.num_parts - 1
# # array defining the minium and maximum massflow for the pump:
# done in init now!
# self.dm_range = np.array((dm_min, dm_max))
# save smallest possible float number for avoiding 0-division:
self._tiny = self._models._tiny
# even though this part is not using numeric solving, number of
# gridpoints are specified anyways:
self.num_gp = 2
# preallocate grids:
self.T = np.zeros(2, dtype=np.float64)
self._T_init = np.zeros_like(self.T) # init temp for resetting env.
# preallocate T ports array (in Pump only used for dimension checking)
self._T_port = np.zeros_like(self.T)
# self.dm = np.zeros(1)
# self.U = np.zeros(2)
# preallocate grids for port connection parameters
# cross section area of wall of connected pipe, fluid cross section
# area of, gridspacing and lambda of wall of connected pipe
self._A_wll_conn_p = np.zeros_like(self._T_port)
self._A_fld_conn_p = np.zeros_like(self._T_port)
self._port_gsp = np.full_like(self._T_port, self._tiny)
self._lam_wll_conn_p = np.full_like(self._T_port, self._tiny)
self._lam_port_fld = np.full_like(self._T_port, self._tiny)
# port_definition (first and last array element):
self.port_num = 2
# Index to own value array to get values of own ports, meaning if I
# index a FLATTENED self.T.flat with self._port_own_idx, I need to
# get values accoring to the order given in self.port_names.
# That is, this must yield the value of the cell in self.T, which is
# belonging to the port 'in':
# self.T.flat[self._port_own_idx[self.port_names.index('in')]]
self._port_own_idx = np.array((0, self.T.shape[0] - 1), dtype=np.int32)
self._port_own_idx_2D = self._port_own_idx # save for compatibility
"""port_array"""
self.port_ids = np.array((), dtype=np.int32)
# save port names
self.port_names = tuple(('in', 'out'))
# set massflow characteristics for ports: in means that an inflowing
# massflow has a positive sign, out means that an outflowing massflow
# is pos.
self.dm_char = tuple(('in', 'out'))
# construct partname+portname to get fast access to own ports:
dummy_var = list(self.port_names)
for i in range(self.port_num):
dummy_var[i] = self.name + ';' + dummy_var[i]
self._own_ports = tuple(dummy_var)
# preallocate port values to avoid allocating in loop:
self._port_vals = np.zeros(self.port_num)
# preallocate list to mark ports which have already been solved in
# topology (to enable creating subnets)
self._solved_ports = list()
# preallocate massflow grid with port_num. An estimate of total rows
# will be preallocated before simulation start in initialize_sim:
self.res_dm = np.zeros((2, self.port_num))
# set if type has to be solved numeric:
self.solve_numeric = False
# if port arrays shall be collapsed to amount of ports to improve speed
self.collapse_arrays = False
self._collapsed = False # bool checker if already collapsed
# determine if part is treated as hydraulic compensator
self.hydr_comp = False
# if part can be a parent part of a primary flow net:
self._flow_net_parent = True
# add each flow channel of part to hydr_comps (will be removed once its
# massflow solving method is completely integrated in flow_net.
# remaining parts except real hydr comps will be used to generate an
# error):
self._models._hydr_comps.add(self.name)
# if the topology construction method has to stop when it reaches the
# part to solve more ports from other sides before completely solving
# the massflow of it. This will be set to false as soon as only one
# port to solve is remaining:
self.break_topology = False
# count how many ports are still open to be solved by topology. If
# break topology is True, this is used to set it to False if 1 is
# reached.
self._cnt_open_prts = self.port_num # not required here
self._port_heatcond = True # if heatcond. over ports is enabled
# determine if part has the capability to affect massflow (dm) by
# diverting flow through ports or adding flow through ports:
self.affect_dm = False
# if the massflow (dm) has the same value in all cells of the part
# (respectively in each flow channel for parts with multiple flows):
self.dm_invariant = True
# if the part has multiple separated flow channels which do NOT mix
# (like a heat exchanger for exampe):
self.multiple_flows = False
# bool checker if flows were updated in update_flownet to avoid
# processing flows in get_diff each time (array for referencing):
self._process_flows = np.array([True])
# bool check if massflow is given for the entire program run:
self.dm_given = False
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = False
# initialize bool if control specified:
self.ctrl_defined = False
# if the parts get_diff method is solved with memory views entirely and
# thus has arrays which are extended by +2 (+1 at each end):
self.enlarged_memview = False
# if the part has a special plot method which is defined within the
# part's class:
self.plot_special = True
# save initialization status:
self.initialized = False
# save memory address of T
self._memadd_T = self.T.__array_interface__['data'][0]
# preallocate massflow grid:
if self.dm_invariant:
self.dm = np.zeros(1)
else:
self.dm = np.zeros(self.port_num)
# and also preallocate grid for massflow through ports:
if not self.hydr_comp:
# if part is no hydraulic compensator, dm ports grid is simply a
# memory view to massflow grid
self._dm_port = self.dm[:]
self._dm_io = self.dm[:]
else:
# if part is a hydraulic compensator, dm ports is separate from dm
self._dm_port = np.zeros_like(self.T)
self._dm_io = np.zeros_like(self.T)
# set array where the CV is set to:
if self.is_actuator:
self._actuator_CV = self.dm[:] # set array to be controlled
self._actuator_CV_name = 'massflow' # set description
# save memory address of dm
self._memadd_dm = self.dm.__array_interface__['data'][0]
# save all kind of info stuff to dicts:
# topology info:
self.info_topology = dict()
# IMPORTANT: THIS VARIABLE **MUST NOT BE INHERITED BY SUB-CLASSES**!!
# If sub-classes are inherited from this part, this bool checker AND
# the following variables MUST BE OVERWRITTEN!
# ist the diff function fully njitted AND are all input-variables
# stored in a container?
self._diff_fully_njit = False
# self._diff_njit = pipe1D_diff # handle to njitted diff function
# input args are created in simenv _create_diff_inputs method
def init_part(self, *, start_massflow, **kwargs):
"""
Initialize pump with specifications, material and initial conditions.
"""
# get material properties and pipe specifications:
self._get_specs_n_props(**kwargs)
# gridspacing is saved in an array of length port_num to save the
# gridspacing of connected parts for heat flux calculation. this array
# is pre-filled with an | |
])
self.configure(f'{conan}.in', conan, True, [
('BIN', f'"{bin_directory}"'),
('CONAN', "'/usr/local/bin/conan'"),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot}.in', buildroot, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{buildroot32}.in', buildroot32, True, [
('BUILDROOT_VERSION', buildroot_version),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{entrypoint}.in', entrypoint, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{gcc}.in', gcc, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{gcc_patch}.in', gcc_patch, True, [
('CROSSTOOL_VERSION', f'"{ct_version}"'),
('JOBS', config["options"]["build_jobs"]),
('SLEEP', config["options"]["sleep"]),
('TIMEOUT', config["options"]["timeout"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{meson}.in', meson, True, [
('BIN', f'"{bin_directory}"'),
('MESON', "'/usr/local/bin/meson'"),
])
self.configure(f'{musl}.in', musl, True, [
('BINUTILS_VERSION', binutils_version),
('BINUTILS_XZ_SHA1', config['binutils']['version']['xz_sha1']),
('GCC_VERSION', gcc_version),
('GCC_XZ_SHA1', config['gcc']['version']['xz_sha1']),
('GMP_VERSION', gmp_version),
('GMP_BZ2_SHA1', config['gmp']['version']['bz2_sha1']),
('ISL_VERSION', isl_version),
('ISL_BZ2_SHA1', config['isl']['version']['bz2_sha1']),
('MPC_VERSION', mpc_version),
('MPC_GZ_SHA1', config['mpc']['version']['gz_sha1']),
('MPFR_VERSION', mpfr_version),
('MPFR_BZ2_SHA1', config['mpfr']['version']['bz2_sha1']),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_HEADERS_XZ_SHA1', config['linux-headers']['version']['xz_sha1']),
('LINUX_VERSION', linux_version),
('LINUX_XZ_SHA1', config['linux']['version']['xz_sha1']),
('MUSL_CROSS_VERSION', musl_cross_version),
('MUSL_VERSION', musl_version),
('MUSL_GZ_SHA1', config['musl']['version']['gz_sha1']),
('JOBS', config["options"]["build_jobs"]),
('USERNAME', config["options"]["username"]),
])
self.configure(f'{qemu}.in', qemu, True, [
('JOBS', config["options"]["build_jobs"]),
('QEMU_VERSION', qemu_version),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{qemu_apt}.in', qemu_apt, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{riscv_gcc}.in', riscv_gcc, True, [
('BINUTILS_VERSION', riscv_binutils_version),
('GCC_VERSION', gcc_version),
('GDB_VERSION', riscv_gdb_version),
('GLIBC_VERSION', riscv_glibc_version),
('JOBS', config["options"]["build_jobs"]),
('NEWLIB_VERSION', riscv_newlib_version),
('TOOLCHAIN_VERSION', riscv_toolchain_version),
])
self.configure(f'{shortcut}.in', shortcut, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{target_features}.in', target_features, True, [
('BIN', f'"{bin_directory}"'),
])
self.configure(f'{vcpkg}.in', vcpkg, True, [
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
self.configure(f'{vcpkg_triplet}.in', vcpkg_triplet, True, [
('BIN', f'"{bin_directory}"'),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
])
def configure_ctng_config(self):
'''Configure the scripts for crosstool-NG.'''
patch = f'{HOME}/ct-ng/patch.sh'
replacements = []
# Patch the GCC version.
old_gcc_major = '8'
old_gcc_version = '8.3.0'
replacements.append(('GCC_V_OLD', f'CT_GCC_V_{old_gcc_major}=y'))
ct_gcc = [f'CT_GCC_V_{gcc_major}=y']
for gcc_v in reversed(range(int(old_gcc_major), int(gcc_major))):
ct_gcc.append(f'# CT_GCC_V_{gcc_v} is not set')
replacements.append(('GCC_V_NEW', '\\n'.join(ct_gcc)))
replacements.append(('GCC_OLD', old_gcc_version))
replacements.append(('GCC_NEW', gcc_version))
# Patch the MinGW version.
old_mingw_major = '6'
old_mingw_version = '6.0.0'
replacements.append(('MINGW_V_OLD', f'CT_MINGW_V_{old_mingw_major}=y'))
ct_mingw = [f'CT_MINGW_V_{mingw_major}=y']
for mingw_v in reversed(range(int(old_mingw_major), int(mingw_major))):
ct_mingw.append(f'# CT_MINGW_V_{mingw_v} is not set')
replacements.append(('MINGW_V_NEW', '\\n'.join(ct_mingw)))
replacements.append(('MINGW_OLD', old_mingw_version))
replacements.append(('MINGW_NEW', mingw_version))
# Configure the glibc version.
old_glibc_major = '2'
old_glibc_minor = '29'
old_glibc_version = '2.29'
replacements.append(('GLIBC_V_OLD', f'CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor}=y'))
ct_glibc = [f'CT_GLIBC_V_{glibc_major}_{glibc_minor}=y']
if old_glibc_major == glibc_major:
for glibc_v in reversed(range(int(old_glibc_minor), int(glibc_minor))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_{glibc_v} is not set')
else:
ct_glibc.append(f'# CT_GLIBC_V_{old_glibc_major}_{old_glibc_minor} is not set')
for glibc_v in reversed(range(int(old_glibc_major) + 1, int(glibc_major))):
ct_glibc.append(f'# CT_GLIBC_V_{glibc_major}_0 is not set')
replacements.append(('GLIBC_V_NEW', '\\n'.join(ct_glibc)))
replacements.append(('GLIBC_OLD', old_glibc_version))
replacements.append(('GLIBC_NEW', glibc_version))
# Configure the musl version.
old_musl_major = '1'
old_musl_minor = '1'
old_musl_patch = '21'
old_musl_version = '1.1.21'
replacements.append((
'MUSL_V_OLD',
f'CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch}=y'
))
ct_musl = [
f'CT_MUSL_V_{musl_major}_{musl_minor}_{musl_patch}=y',
f'# CT_MUSL_V_{old_musl_major}_{old_musl_minor}_{old_musl_patch} is not set'
]
replacements.append(('MUSL_V_NEW', '\\n'.join(ct_musl)))
replacements.append(('MUSL_OLD', old_musl_version))
replacements.append(('MUSL_NEW', musl_version))
# Configure the expat version.
old_expat_major = '2'
old_expat_minor = '2'
old_expat_version = '2.2.6'
replacements.append(('EXPAT_V_OLD', f'CT_EXPAT_V_{old_expat_major}_{old_expat_minor}=y'))
ct_expat = [
f'CT_EXPAT_V_{expat_major}_{expat_minor}=y',
f'# CT_EXPAT_V_{old_expat_major}_{old_expat_minor} is not set'
]
replacements.append(('EXPAT_V_NEW', '\\n'.join(ct_expat)))
replacements.append(('EXPAT_OLD', old_expat_version))
replacements.append(('EXPAT_NEW', expat_version))
self.configure(f'{patch}.in', patch, True, replacements)
def configure_musl_config(self):
'''Configure the MUSL libc config files.'''
template = f'{HOME}/musl/config.mak.in'
for image in musl_cross_images:
outfile = f'{HOME}/musl/config/{image.target}.mak'
self.configure(template, outfile, False, [
('BINUTILS_VERSION', binutils_version),
('GCC_CONFIG', image.gcc_config),
('GCC_VERSION', gcc_version),
('GMP_VERSION', gmp_version),
('ISL_VERSION', isl_version),
('LINUX_HEADERS_VERSION', linux_headers_version),
('LINUX_VERSION', linux_version),
('MPC_VERSION', mpc_version),
('MPFR_VERSION', mpfr_version),
('MUSL_VERSION', musl_version),
('TARGET', image.config),
('USERNAME', config['options']['username']),
])
def configure_dockerfile(
self,
image,
template=None,
replacements=None,
base='ubuntu',
spec='spec',
symlink='symlink',
toolchain='toolchain',
wrapper='wrapper',
linker='',
cc='',
cxx='',
):
'''Configure a Dockerfile from template.'''
# These files are read in the order they're likely to change,
# as well as compile-time.
# Any template files may have long compilations, and will
# change rarely. Qemu is an apt package, and unlikely to change.
# Symlinks, toolchains, and entrypoints change often, but are
# cheap and easy to fix.
contents = []
# Mandatory Docker templates, the base image.
# These will **never** change,
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.adduser.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.build-essential.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.directory.in', 'r') as file:
contents.append(file.read())
# Optional docker templates, in order of compiler time.
# These will change, but it's important later templates
# build faster than earlier templates. If done incorrectly,
# a full rebuild can take well over a week.
if template is not None:
with open(template, 'r') as file:
contents.append(file.read())
if image.qemu:
with open(f'{HOME}/docker/Dockerfile.qemu.in', 'r') as file:
contents.append(file.read())
if wrapper is not None:
with open(f'{HOME}/docker/Dockerfile.{wrapper}.in', 'r') as file:
contents.append(file.read())
if symlink is not None:
with open(f'{HOME}/docker/Dockerfile.{symlink}.in', 'r') as file:
contents.append(file.read())
if spec is not None:
with open(f'{HOME}/docker/Dockerfile.{spec}.in', 'r') as file:
contents.append(file.read())
if toolchain is not None:
with open(f'{HOME}/docker/Dockerfile.{toolchain}.in', 'r') as file:
contents.append(file.read())
# Add the mandatory entrypoint.
with open(f'{HOME}/docker/Dockerfile.entrypoint.in', 'r') as file:
contents.append(file.read())
# Add image labels and metadata.
with open(f'{HOME}/docker/Dockerfile.metadata.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Add to the replacements all the shared values.
if replacements is None:
replacements = []
replacements = replacements + [
('AUTHORS', config['metadata']['authors']),
('EMSDK_VERSION', emsdk_version),
('BIN', f'"{bin_directory}"'),
('CC', f'"{cc}"'),
('CXX', f'"{cxx}"'),
('ENTRYPOINT', f'"{bin_directory}/entrypoint.sh"'),
('FLAGS', f'"{image.flags}"'),
('LINKER', f'"{linker}"'),
('MAINTAINER', config['metadata']['maintainer']),
('OPTIONAL_FLAGS', f'"{image.optional_flags}"'),
('OS', image.os.to_triple() or 'unknown'),
('TARGET', image.target),
('UBUNTU_VERSION', ubuntu_version),
('URL', config['metadata']['url']),
('USERNAME', config['options']['username']),
('VCS_URL', config['metadata']['vcs-url']),
]
# Replace the contents and write the output to file.
outfile = f'{HOME}/docker/images/Dockerfile.{image.target}'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_vcpkg_dockerfile(self, base='ubuntu'):
'''Configure only the vcpkg Dockefile.'''
# This is a base image shared by multiple builds.
contents = []
with open(f'{HOME}/docker/Dockerfile.{base}.in', 'r') as file:
contents.append(file.read())
with open(f'{HOME}/docker/Dockerfile.vcpkg.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
replacements = [
('UBUNTU_VERSION', ubuntu_version),
]
outfile = f'{HOME}/docker/pkgimages/Dockerfile.vcpkg'
contents = self.replace(contents, replacements)
self.write_file(outfile, contents, False)
def configure_package_dockerfile(
self,
image,
compiler=None,
compiler_version=None,
conan_system=None,
meson_system=None,
vcpkg_system=None,
):
'''Configure a Dockerfile with package managers enabled.'''
if compiler is None:
compiler = 'gcc'
if compiler_version is None:
compiler_version = gcc_major
if conan_system is None:
conan_system = image.os.to_conan()
if meson_system is None:
meson_system = image.os.to_meson()
if vcpkg_system is None:
vcpkg_system = image.os.to_vcpkg()
template = f'{HOME}/docker/Dockerfile.package.in'
outfile = f'{HOME}/docker/pkgimages/Dockerfile.{image.target}'
self.configure(template, outfile, False, [
('COMPILER', compiler),
('COMPILER_VERSION', f'"{compiler_version}"'),
('CONAN_SYSTEM', conan_system),
('CPU_FAMILY', image.family),
('IMAGE_USER', config['options']['username']),
('LINKAGE', image.linkage),
('MESON_SYSTEM', meson_system),
('PROCESSOR', image.processor),
('REPOSITORY', config['metadata']['repository']),
('SYSROOT', f'"{config["options"]["sysroot"]}"'),
('TARGET', image.target),
('TRIPLE', image.triple),
('USERNAME', config['metadata']['username']),
('VCPKG_SYSTEM', vcpkg_system),
])
def configure_cmake(self, image, template, replacements):
'''Configure a CMake template.'''
replacements = replacements + [
('PROCESSOR', image.processor),
('OS', image.os.to_cmake()),
('USERNAME', config["options"]["username"]),
]
contents = []
with open(template, 'r') as file:
contents.append(file.read())
with open(f'{HOME}/cmake/toolchain-include.cmake.in', 'r') as file:
contents.append(file.read())
contents = '\n'.join(contents)
# Replace the contents and write the output to file.
cmake = f'{HOME}/cmake/toolchain/{image.target}.cmake'
contents = self.replace(contents, replacements)
self.write_file(cmake, contents, False)
def configure_symlinks(self, image, template, replacements):
'''Configure a symlink template.'''
replacements = replacements + [
('CC_CPU_LIST', image.cc_cpu_list),
('FLAGS', image.cflags),
('HARDCODED', image.hardcoded_cpulist),
('LD_LIBRARY_PATH', image.ld_library_path),
('LD_PRELOAD', image.ld_preload),
('OPTIONAL_FLAGS', image.optional_cflags),
('RUN_CPU_LIST', image.run_cpu_list),
('TRIPLE', image.triple),
('USERNAME', config["options"]["username"]),
]
symlink = f'{HOME}/symlink/toolchain/{image.target}.sh'
self.configure(template, symlink, True, replacements)
def configure_android(self, image):
'''Configure an Android-SDK image.'''
# Configure the dockerfile.
template = f'{HOME}/docker/Dockerfile.android.in'
self.configure_dockerfile(image, template, [
('ARCH', image.arch),
('TOOLCHAIN', image.toolchain),
])
# Configure the CMake toolchain.
cmake_template = f'{HOME}/cmake/android.cmake.in'
self.configure_cmake(image, cmake_template, [
('ABI', image.abi),
('NDK_DIRECTORY', config['android']['ndk_directory']),
('SDK_VERSION', config['android']['sdk_version']),
])
# Configure the symlinks.
symlink_template = f'{HOME}/symlink/android.sh.in'
self.configure_symlinks(image, symlink_template, [
('NDK_DIRECTORY', config['android']['ndk_directory']),
('PREFIX', f'{image.prefix}-linux-{image.system}'),
('SDK_VERSION', config['android']['sdk_version']),
('TOOLCHAIN', image.toolchain),
])
# Build derived images with package managers enabled.
# Only want the major version, Conan fails othewise.
compiler_version = config['android']['clang_version']
major_version = re.match(r'^(\d+).*$', compiler_version).group(1)
self.configure_package_dockerfile(image, 'clang', major_version)
def configure_buildroot(self, image):
'''Configure a buildroot image.'''
# Get the proper dependent parameters for our image.
if image.symlink_sysroot:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu-sysroot.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/buildroot-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot-qemu.sh.in'
else:
cmake_template = f'{HOME}/cmake/buildroot.cmake.in'
symlink_template = f'{HOME}/symlink/buildroot.sh.in'
if image.use_32:
template = f'{HOME}/docker/Dockerfile.buildroot32.in'
else:
template = f'{HOME}/docker/Dockerfile.buildroot.in'
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
])
# Configure the CMake toolchain.
self.configure_cmake(image, cmake_template, [
('TRIPLE', image.config),
])
# Configure the symlinks.
self.configure_symlinks(image, symlink_template, [
('ARCH', image.processor),
('TRIPLE', image.triple),
])
# Build derived images with package managers enabled.
if image.os == OperatingSystem.Linux or image.os == OperatingSystem.Windows:
self.configure_package_dockerfile(image)
def configure_crosstool(self, image):
'''Configure a crosstool-NG image.'''
# Configure the dockerfile.
if image.patches:
template = f'{HOME}/docker/Dockerfile.crosstool-patch.in'
files = []
for patch in image.patches:
files += glob.glob(f'diff/{patch}.*')
patches = [f'COPY ["{i}", "/src/diff/"]' for i in files]
patches = '\n'.join(patches)
else:
template = f'{HOME}/docker/Dockerfile.crosstool.in'
patches = ''
self.configure_dockerfile(image, template, [
('ARCH', image.processor),
('CONFIG', image.config),
('PATCH', patches),
])
# Get the proper dependent parameters for our image.
if image.os == OperatingSystem.BareMetal:
cmake_template = f'{HOME}/cmake/crosstool-elf.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool.sh.in'
elif image.qemu:
cmake_template = f'{HOME}/cmake/crosstool-os-qemu.cmake.in'
symlink_template = f'{HOME}/symlink/crosstool-qemu.sh.in'
else:
| |
#!/usr/bin/python
#
# x2z2
# BT Nodes for Testing, ID, Solving
#
# Solve using the x^2 + y^2 method Craig uses
# for puma joint 2 (eqn 4.65 p 118)
#
# BH 2/2/17
#
# BH : Dec-21: SIMPLIFY! After squaring and summing,
# if a one-unk equation is identified, just add
# it to the list of one-unk equations (in a persistent way).
#
# To reflect this change we rename it to x2y2_transform!
# also please forgive occasional references to x2z2 instead of x2y2
# they are the same!!
#
# Copyright 2021 University of Washington
# Developed by <NAME> and <NAME>
# BioRobotics Lab, University of Washington
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sympy as sp
import numpy as np
from sys import exit
from ikbtfunctions.helperfunctions import *
from ikbtbasics.kin_cl import *
from ikbtbasics.ik_classes import * # special classes for Inverse kinematics in sympy
from ikbtfunctions.ik_robots import *
sp.var('th_23 Px Py Pz')
class test_x2z2(b3.Action): # tester for your ID
def tick(self, tick):
test_number = tick.blackboard.get('test_number')
assert(test_number in [1, 2]), ' BAD TEST NUMBER'
if(test_number == 1):
# Simplistic test in which we
# just set up bb data for testing (not really a test!)
Td = ik_lhs() # basic LHS template for TEST
Ts = sp.zeros(4)
Td[0,3] = sp.cos(th_1)*Px + sp.sin(th_1)*Py
Ts[0,3] = a_3*sp.cos(th_23) - d_4*sp.sin(th_23) + a_2 * sp.cos(th_2)
Td[2,3] = -Pz
Ts[2,3] = a_3*sp.sin(th_23) + d_4*sp.cos(th_23) + a_2 * sp.sin(th_2)
testm = matrix_equation(Td,Ts)
R = Robot()
R.mequation_list = [testm]
ud1 = unknown(th_1)
uth2 = unknown(th_2)
uth3 = unknown(th_3)
uth23 = unknown(th_23)
uth4 = unknown(th_4)
uth5 = unknown(th_5)
variables = [ud1, uth2, uth23, uth3, uth4, uth5]
R.generate_solution_nodes(variables) #for the solution graph
ud1.solutions.append(a_3) # placeholder
ud1.nsolutions = 1
ud1.set_solved(R,variables) # needed for this test
#uth23.set_solved(R,variables) # needed for this test
R.sum_of_angles_transform(variables) # should add th_23=th_2+th_3 to list
[L1, L2, L3p] = R.scan_for_equations(variables) # lists of 1unk and 2unk equations
tick.blackboard.set('eqns_1u', L1)
tick.blackboard.set('eqns_2u', L2)
tick.blackboard.set('eqns_3pu', L3p)
tick.blackboard.set('unknowns',variables)
tick.blackboard.set('Robot',R)
return b3.SUCCESS
if(test_number == 2):
# tests in the context of the full Puma solution
#
#
# The famous Puma 560 (solved in Craig)
#
robot = 'Puma'
[dh_Puma560, vv_Puma, params_Puma, pvals_Puma, unk_Puma] = robot_params('Puma')
dh = dh_Puma560
vv = vv_Puma
variables = unk_Puma # variables aka unknowns
params = params_Puma
pvals = pvals_Puma
################## (all robots) ######################
## make sure each unknown knows its position (index)
i = 0
for u in variables :
u.n = i
i+=1
print('Testing x2z2transform with Puma Kinematics')
testflag = False # deprecated but needed(!)
# read kinematic model from pickle file / or compute it from scratch
[M, R, variables ] = kinematics_pickle(robot, dh, params, pvals, vv, variables, testflag)
#def kinematics_pickle(rname, dh, constants, pvals, vv, unks, test):
## check the pickle in case DH params were changed in robot_params making the
# pickle obsolete.
check_the_pickle(dh_Puma560, M.DH)
R.name = 'Puma x2z2 Test Robot'
# set th_1 to solved
variables [0].solutions.append(a_3)
variables [0].nsolutions = 1
variables [0].set_solved(R,variables ) # needed for this test
print('x2z2 setup for Test 2: generate SOA equations:\n')
R.sum_of_angles_transform(variables ) # should add th_23=th_2+th_3 to list
[L1, L2, L3p] = R.scan_for_equations(variables ) # lists of 1unk and 2unk equations
tick.blackboard.set('eqns_1u', L1)
tick.blackboard.set('eqns_2u', L2)
tick.blackboard.set('eqns_3pu', L3p)
tick.blackboard.set('unknowns', variables )
tick.blackboard.set('Robot', R)
return b3.SUCCESS
class x2z2_transform(b3.Action):
# Eff Dec 2021, x2z2 is NOW a transform which only generates a 1-unk equation
# for *other* leaves to solve.
def __init__(self):
super().__init__()
self.SolvedOneFlag = False # turn off this expensive leaf after it has worked once
def tick(self, tick):
#if self.SolvedOneFlag: # we will only get lucky with this method once (HACK!)
#print(' we already used x2z2 method')
#return b3.FAILURE
unknowns = tick.blackboard.get('unknowns') # the current list of unknowns
R = tick.blackboard.get('Robot')
one_unk = tick.blackboard.get('eqns_1u')
two_unk = tick.blackboard.get('eqns_2u')
more_unk = tick.blackboard.get('eqns_3pu')
u = tick.blackboard.get('curr_unk')
if(self.BHdebug):
print("x2z2, running: ", self.Name)
print('len(3p): ', len(more_unk))
print('len(2): ', len(two_unk))
print('len(1): ', len(one_unk))
print("currently looking at: ", u.symbol)
#sp.pprint(Tm.Ts)
solved = False
f1 = False
f2 = False
eqn_ls = []
# note: x2y2 is somewhat costly,
# This is a hack exploiting it seems to be needed only for
# Th 2 or Th_3 on the Puma and Kawasaki robot
if not u.symbol == th_3 or u.symbol == th_2 :
return b3.FAILURE
for e in (two_unk): # only two-unk list is enough
tmp = e.RHS + e.LHS
if (tmp.has(Py) or tmp.has(Px) or tmp.has(Pz)):
eqn_ls.append(e)
found = False
if (self.BHdebug):
print("found potential eqn list: ", len(eqn_ls))
print(eqn_ls)
# find any two equations and add their squares of each side
# ( we can't count on just [0,3],[2,3])
#
for i in range(len(eqn_ls)):
eqn1 = eqn_ls[i]
r1 = eqn1.RHS
l1 = eqn1.LHS
for j in range(i+1, len(eqn_ls)):
eqn2 = eqn_ls[j]
r2 = eqn2.RHS
l2 = eqn2.LHS
if (self.BHdebug):
print("currently evaluating: ")
print(eqn1)
print(eqn2)
print("\n")
temp_l = l1*l1 + l2*l2
temp_l = temp_l.simplify()
if count_unknowns(unknowns, temp_l) == 0:
temp_r = r1*r1 + r2*r2
temp_r = temp_r.simplify()
temp_r = temp_r.subs(soa_expansions)
temp_r = temp_r.simplify()
if count_unknowns(unknowns, temp_r) == 1:
print("X2Z2 found a useful eqn!")
found = True
if found:
break
if found:
break
if not found:
print("x2y2 did not find suitable eqns")
return b3.FAILURE
# find the current unknown
for u in unknowns:
if temp_r.has(u.symbol):
unknown = u
unk = u.symbol
if self.BHdebug: print('x2y2: The unknown variable is: ', unk)
if not unknown.solved:
######################################### NEW ###############
## NEW instead of solving it here, we just put it in the list
# of one-unknown equations so that some other leaf can solve it
unknown.solvemethod += 'x2z2 transform and ' # only part of soln.
R.kequation_aux_list.append(kc.kequation(temp_l,temp_r))
#############################################################
tick.blackboard.set('Robot', R)
tick.blackboard.set('unknowns',unknowns) # the current list of unknowns
self.SolvedOneFlag = True
# we have a new 1-unk equation for other leafs to solve
# but nothing new is solved yet.
return b3.SUCCESS
#######################################################################
# Test code:
class TestSolver010(unittest.TestCase):
def setUp(self):
self.DB = True # debug flag
print('\n\n============ Setup to Test x2z2 Transform ==================')
return
def runTest(self):
self.test_x2z2()
def test_x2z2(self):
ik_tester = b3.BehaviorTree() # two leaves to 1) setup test 2) carry out test
bb = b3.Blackboard()
### A BT node to set everything up for tests
x2z2_setup = test_x2z2() # fcn can set up test 2 ways
#x2z2_setup.BHdebug = True
x2z2_setup.Name = "Setup"
### BT node for the actual tests
x2z2_work = x2z2_transform() # same node on two different setups
x2z2_work.Name = "x2z2 ID/Transform"
#x2z2_work.BHdebug | |
> 0) and (len(abbreviation) > 0):
result = lookup_abbreviation(abbreviation, line, search_end, position)
if not result:
## print(2,start,search_end)
result = abbreviation_match(abbreviation, previous_words, line, search_end, position
# @semanticbeeng @todo not used , False, False
)
if not result and alt_abbreviation:
result = lookup_abbreviation(abbreviation, line, search_end, position)
if not result:
result = abbreviation_match(alt_abbreviation, previous_words, line, search_end, position
# @semanticbeeng @todo not used , False, False
)
if result:
abbreviation = alt_abbreviation
## print(result)
if result and ((not OK_jargon(result[2])) or (not OK_abbrev_antec(result[2]))):
result = None # @semanticbeeng static type
if result:
if result[4]: ## if the abbreviation is one off
ARG2_begin = pattern.start() + position + 2
ARG2_string = abbreviation[1:]
ARG2_end = ARG2_begin + len(abbreviation) - 1
else:
ARG2_begin = pattern.start() + position + 1
ARG2_string = abbreviation
ARG2_end = ARG2_begin + len(abbreviation)
# @semanticbeeng @todo use Abbr explicitly
ARG1_begin = result[0]
ARG1_end = result[1]
ARG1_string = result[2]
output_type = result[3]
elif len(abbreviation) == 1:
## single capital letters divided by spaces, are an alternative match, e.g., (J R A)
## Abbreviations can also contain periods
## -- we will try removing up to 7 spaces/periods
abbreviation = re.sub('[. ]', '', pattern.group(2), 7) ## remove upto 7 spaces/periods from abbreviation
if (start == 0) and (previous_line != '') and (len(previous_words) < len(abbreviation)):
more_words = get_more_words(previous_line, (1 + len(abbreviation) - len(previous_words)))
if more_words and (len(more_words) > 0):
offset_adjustment = len(previous_line)
more_words.extend(previous_words)
result = lookup_abbreviation(abbreviation, previous_line + line, search_end + offset_adjustment, position - offset_adjustment)
if not result:
## print(3)
result = abbreviation_match(abbreviation, more_words, previous_line + line, search_end + offset_adjustment, position - offset_adjustment
# @semanticbeeng @todo not used , False, False
)
else:
result = lookup_abbreviation(abbreviation, line, search_end, position)
## print(4)
if not result:
result = abbreviation_match(abbreviation, previous_words, line, search_end, position,
# @semanticbeeng @todo not used previous_line, more_words
)
if result:
if result[4]:
ARG2_begin = pattern.start() + position + 2
ARG2_string = abbreviation[1:]
else:
ARG2_begin = pattern.start() + position + 1
ARG2_string = pattern.start(2)
ARG2_end = start + pattern.end() - 1
ARG1_begin = result[0]
ARG1_end = result[1]
ARG1_string = result[2]
output_type = result[3]
elif ' ' in pattern.group(0):
## possibility of a multi-word item in parentheses (antecedent) matching the word right before
## the parentheses (abbreviation), i.e., the backwards case
# @semanticbeeng static type @todo previous_word: Optional[Match] = None
if pattern.end() > 3:
previous_word: Optional[Match[str]] = re.search('([a-zA-ZΑ-ϖ][a-zA-Z0-9-/Α-ϖ]*[a-zA-Z0-9Α-ϖ])[^a-z0-9]$', line[:pattern.start()])
else:
previous_word = None # @semanticbeeng static type @todo
if previous_word:
abbreviation = previous_word.group(1)
antecedent_string = pattern.group(0)[1:-1]
result = lookup_abbreviation(abbreviation, antecedent_string, len(pattern.group(0)) - 2, position,
backwards_borders=[previous_word.start(), pattern.end()])
if not result:
forward_words = remove_empties(word_split_pattern.split(antecedent_string.rstrip(' ')))
line_offset = len(pattern.group(0)) - 2
## line_offset effects begin and end
## *** 57 ***
result = abbreviation_match(abbreviation, forward_words, antecedent_string, line_offset, position
# @semanticbeeng @todo not used , previous_line, more_words
)
if result:
ARG1_string = result[2]
ARG2_string = abbreviation
ARG1_begin = pattern.start() + position + 1
ARG1_end = ARG1_begin + len(ARG1_string) ## result[1]-1
ARG2_begin = previous_word.start() + position ## result[0] ## correct for lookup, but not for calculated
ARG2_end = ARG2_begin + len(ARG2_string)
output_type = result[3]
## must adjust offsets for backwards situation
## perhaps provide offsets explicitly (start position = start of first word + offset)
## end position = end position of pattern + offset
if result:
if not invalid_abbreviation(ARG2_string) and not invalid_abbrev_of(ARG2_string, ARG1_string):
#
# @semanticbeeng @todo extract @data entity definitions
#
ARG2: Dict[str, str] = make_nyu_entity(output_type, ARG2_string, ARG2_begin, ARG2_end)
ARG1: Dict[str, str] = make_nyu_entity(output_type, ARG1_string, ARG1_begin, ARG1_end)
relation_start: int = min(ARG1_begin, ARG2_begin)
relation_end: int = max(ARG1_end, ARG2_end)
output.extend([ARG1, ARG2,
{'CLASS': 'RELATION', 'TYPE': 'ABBREVIATE', 'ID': make_nyu_id('RELATION'),
'START': relation_start, 'END': relation_end,
'ARG1': ARG1['ID'], 'ARG2': ARG2['ID'],
'ARG1_TEXT': ARG1_string, 'ARG2_TEXT': ARG2_string, 'GRAM_SIGNAL': 'PARENTHESES'}])
## not currently using context_string or context
if not result:
start = pattern.start() + 1
else:
last_start = start
start = pattern.end()
if extend_abbreviation_context(pattern, line):
extend_antecedent = True
else:
extend_antecedent = False
pattern = parentheses_pattern_match(line, start, 2)
## pattern = parentheses_pattern2.search(line,start)
# import json
# print("############ ")
# print("get_next_abbreviate_relations ======>>>>>>>> " + json.dumps(output))
# print("############ ")
return (output)
#
#
#
def record_abbreviate_dictionary(fulltext: str, abbreviation: str) -> None:
## print(fulltext,abbreviation,argclass)
# global abbr_to_full_dict
# global full_to_abbr_dict
### also need to update full_to_abbr_dict *** 57 ***
key = abbreviation ## use naturally occuring form of abbreviations (otherwise causes problems, e.g., if abbreviation is OR
value = regularize_match_string1(fulltext)
if key in Abbreviate.abbr_to_full_dict:
if not value in Abbreviate.abbr_to_full_dict[key]:
Abbreviate.abbr_to_full_dict[key].append(value)
else:
Abbreviate.abbr_to_full_dict[key] = [value]
if value in Abbreviate.full_to_abbr_dict:
if not key in Abbreviate.full_to_abbr_dict[value]:
Abbreviate.full_to_abbr_dict[value].append(key) # @semanticbeeng @todo @arch global state mutation
else:
Abbreviate.full_to_abbr_dict[value] = [key] # @semanticbeeng @todo @arch global state mutation
#
# @semanticbeeng other than `ABBREVIATE`, none of these strings is referenced in code
# @semanticbeeng @todo encapsulate
#
ARG1_NAME_TABLE: Dict[str, str] = {'EXEMPLIFY': 'SUBCLASS', 'DISCOVER': 'INVENTOR', 'MANUFACTURE': 'MAKER', 'SUPPLY': 'SUPPLIER',
'ORIGINATE': 'INVENTOR', 'ALIAS': 'FULLNAME', 'ABBREVIATE': 'FULLNAME', 'BETTER_THAN': 'BETTER',
'BASED_ON': 'DERIVED', 'CONTRAST': 'THEME', 'CORROBORATION': 'THEME', 'CO-CITATION': 'THEME',
'POSITIVE': 'JUDGE', 'NEGATIVE': 'JUDGE', 'SIGNIFICANT': 'JUDGE', 'PRACTICAL': 'JUDGE', 'STANDARD': 'JUDGE', 'EMPHASIZED_TERM': 'THEME', 'COMPONENT': 'PART',
'FEATURE': 'FEATURE'}
ARG2_NAME_TABLE: Dict[str, str] = {'EXEMPLIFY': 'SUPERCLASS', 'DISCOVER': 'INVENTION', 'MANUFACTURE': 'PRODUCT', 'SUPPLY': 'PRODUCT',
'ORIGINATE': 'INVENTION', 'ALIAS': 'FULLNAME', 'ABBREVIATE': 'SHORTNAME', 'BETTER_THAN': 'WORSE',
'BASED_ON': 'ORIGINAL', 'CONTRAST': 'THEME', 'CORROBORATION': 'THEME', 'CO-CITATION': 'THEME',
'POSITIVE': 'THEME', 'NEGATIVE': 'THEME', 'SIGNIFICANT': 'THEME', 'PRACTICAL': 'THEME', 'STANDARD': 'THEME', 'EMPHASIZED_TERM': 'THEME', 'COMPONENT': 'WHOLE',
'FEATURE': 'BEARER'}
#
# Serializes and persists a list of Fact entities
# @semanticbeeng @todo define FACT entity: is that different than ABBR?
#
def write_fact_file(output: List[Dict[str, str]], outfile: File[ABBR]) -> None:
global ARG1_NAME_TABLE
global ARG2_NAME_TABLE
# global FACT_STYLE @semanticbeeng not used
keys = ['ID', 'TYPE', 'SUBTYPE', 'START', 'END', 'ARG1', 'ARG2', 'ARG1_TEXT', 'ARG2_TEXT', 'GRAM_SIGNAL', 'TEXT_SIGNAL', 'TEXT']
# @semanticbeeng @todo @jep
# with outfile.openText(mode='w') as outstream:
outstream = outfile.openText('w')
for out in output:
if out['CLASS'] == 'RELATION':
if 'SUBTYPE' in out:
look_up = out['SUBTYPE']
else:
look_up = out['TYPE']
ARG1_NAME: str = ARG1_NAME_TABLE[look_up]
ARG2_NAME: str = ARG2_NAME_TABLE[look_up]
fact: str = out['CLASS']
for key in keys:
if key in out:
value = out[key]
if type(value) == int:
value = str(value)
else:
value = '"' + value + '"'
fact = fact + ' ' + key + '=' + value
if key == 'ARG1':
assert 'ARG1_NAME' in locals()
fact = fact + ' ARG1_NAME="' + ARG1_NAME + '"'
elif key == 'ARG2':
assert 'ARG2_NAME' in locals()
fact = fact + ' ARG2_NAME="' + ARG2_NAME + '"'
outstream.write(fact + os.linesep)
#
#
#
def bad_patent_line(line: str) -> bool:
if (len(line) > 5000) and not (re.search('[a-z]', line)):
return (True)
else:
return (False)
#
# @semamanticbeeng @todo what is this doing?
#
def triplify(inlist: List[Dict[str, str]]) -> List[List[Dict[str, str]]]:
if len(inlist) % 3 != 0:
print('problem with triplify for:', inlist)
return [] # [[{}]] # @semanticbeeng @ todo static typing
else:
output = []
start = 0
while start < len(inlist):
output.append(inlist[start:start + 3])
start = start + 3
return (output)
#
# Find abbreviations for the content of a file fo @data TXT3
#
def run_abbreviate_on_lines(lines: List[str], abbr_file: File[ABBR], reset_dictionary: bool=False) -> List[Dict[str, str]]:
# global abbr_to_full_dict
# global full_to_abbr_dict
global id_number
output: List[Dict[str, str]] = []
start = 0
previous_line: str = None
if reset_dictionary:
Abbreviate.abbr_to_full_dict.clear() # @semanticbeeng @todo @arch global state mutation
Abbreviate.full_to_abbr_dict.clear() # @semanticbeeng @todo @arch global state mutation
id_number = 0
# @semanticbeeng @todo not used with open(abbr_file, 'w') as outstream:
abbr_file.openText('w') # create even empty
for line in lines:
line = line.replace(os.linesep, ' ')
end: int = start + len(line)
trimmed_line = line.strip(' \t')
out: List[Dict[str, str]] = []
if ((trimmed_line.count('\t') + trimmed_line.count(' ')) > (len(trimmed_line) / 3)) or bad_patent_line(trimmed_line):
pass
else:
out = get_next_abbreviate_relations(previous_line, line, start)
if out:
for triple in triplify(out):
if triple[1]['CLASS'] == 'ENAMEX':
argtype = triple[1]['TYPE']
else:
argtype = triple[1]['CLASS']
if argtype == 'JARGON':
record_abbreviate_dictionary(triple[0]['TEXT'], triple[1]['TEXT'])
output.extend(out)
start = end
previous_line = line
# @semanticbeeng @todo @arch global state control - this fails
# abbr_to_full_dict = dictionary.freeze_dict(abbr_to_full_dict)
# full_to_abbr_dict = dictionary.freeze_dict(full_to_abbr_dict)
if output:
# @semanticbeeng @todo @dataFlow
write_fact_file(output, abbr_file)
return (output)
#
#
#
def save_abbrev_dicts(abbr_to_full_file: File[ABBR], full_to_abbr_file: File[ABBR]) -> None:
with abbr_to_full_file.openText(mode='w') as abbr_full_stream, full_to_abbr_file.openText(mode='w') as full_abbr_stream:
for key in Abbreviate.abbr_to_full_dict:
abbr_full_stream.write(interior_white_space_trim(key))
for value in Abbreviate.abbr_to_full_dict[key]:
value = interior_white_space_trim(value)
abbr_full_stream.write('\t' + value)
abbr_full_stream.write(os.linesep)
for key | |
from datetime import date
MAX_YEAR = date.today().year + 1
# these are the acceptable ranges for answers from the master codebook
RANGE_LIST = {
'g1_01d': range(1, 31 + 1) + [99],
'g1_01m': range(1, 12 + 1) + [99],
'g1_01y': range(1900, MAX_YEAR) + [9999],
'g1_05': [1, 2, 8, 9],
'g1_06d': range(1, 31 + 1) + [99],
'g1_06m': range(1, 12 + 1) + [99],
'g1_06y': range(1900, MAX_YEAR) + [9999],
'g1_07a': range(0, 120 + 1) + [999],
'g1_07b': range(0, 12 + 1) + [99],
'g1_07c': range(0, 31 + 1) + [99],
'g1_08': [1, 2, 3, 4, 5, 8, 9],
'g1_09': [1, 2, 3, 4, 9],
'g1_10': range(0, 99 + 1),
'g2_03ad': range(1, 31 + 1) + [99],
'g2_03am': range(1, 12 + 1) + [99],
'g2_03ay': range(1900, MAX_YEAR) + [9999],
'g2_03bd': range(1, 31 + 1) + [99],
'g2_03bm': range(1, 12 + 1) + [99],
'g2_03by': range(1900, MAX_YEAR) + [9999],
'g2_03cd': range(1, 31 + 1) + [99],
'g2_03cm': range(1, 12 + 1) + [99],
'g2_03cy': range(1900, MAX_YEAR) + [9999],
'g2_03dd': range(1, 31 + 1) + [99],
'g2_03dm': range(1, 12 + 1) + [99],
'g2_03dy': range(1900, MAX_YEAR) + [9999],
'g2_03ed': range(1, 31 + 1) + [99],
'g2_03em': range(1, 12 + 1) + [99],
'g2_03ey': range(1900, MAX_YEAR) + [9999],
'g2_03fd': range(1, 31 + 1) + [99],
'g2_03fm': range(1, 12 + 1) + [99],
'g2_03fy': range(1900, MAX_YEAR) + [9999],
'g3_01': [0, 1, 8, 9],
'g4_02': [1, 2, 8, 9],
'g4_03a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 99],
'g4_05': [1, 2, 3, 4, 9],
'g4_08': [0, 1, 8, 9],
'g5_01d': range(1, 31 + 1) + [99],
'g5_01m': range(1, 12 + 1) + [99],
'g5_01y': range(1900, MAX_YEAR) + [999, 9999],
'g5_02': [1, 2, 8, 9],
'g5_03d': range(1, 31 + 1) + [99],
'g5_03m': range(1, 12 + 1) + [99],
'g5_03y': range(1900, MAX_YEAR) + [999, 9999],
'g5_04a': range(0, 120 + 1),
'g5_04b': range(0, 12 + 1),
'g5_04c': range(0, 31 + 1),
'g5_05': [1, 2, 3, 4, 5, 8, 9],
'g5_06a': [1, 2, 3, 4, 9],
'g5_06b': range(0, 99 + 1),
'g5_07': [0, 1, 8, 9],
'a1_01_1': [0, 1, 8, 9],
'a1_01_2': [0, 1, 8, 9],
'a1_01_3': [0, 1, 8, 9],
'a1_01_4': [0, 1, 8, 9],
'a1_01_5': [0, 1, 8, 9],
'a1_01_6': [0, 1, 8, 9],
'a1_01_7': [0, 1, 8, 9],
'a1_01_8': [0, 1, 8, 9],
'a1_01_9': [0, 1, 8, 9],
'a1_01_10': [0, 1, 8, 9],
'a1_01_11': [0, 1, 8, 9],
'a1_01_12': [0, 1, 8, 9],
'a1_01_13': [0, 1, 8, 9],
'a1_01_14': [0, 1, 8, 9],
'a2_01a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_02': [0, 1, 8, 9],
'a2_03a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_04': [1, 2, 3, 8, 9],
'a2_05': [1, 2, 3, 8, 9],
'a2_06': [0, 1, 8, 9],
'a2_07': [0, 1, 8, 9],
'a2_08a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_09_1a': [1, 2, 3, 4, 5, 8, 9],
'a2_09_2a': [0, 1, 2, 3, 4, 5, 8, 9],
'a2_10': [0, 1, 8, 9],
'a2_11': [0, 1, 8, 9],
'a2_12': [0, 1, 8, 9],
'a2_13': [0, 1, 8, 9],
'a2_14': [0, 1, 8, 9],
'a2_15a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_16': [0, 1, 8, 9],
'a2_17': [0, 1, 8, 9],
'a2_18': [0, 1, 8, 9],
'a2_19': [1, 2, 3, 8, 9],
'a2_20': [0, 1, 8, 9],
'a2_21': [0, 1, 8, 9],
'a2_22a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_23': [0, 1, 8, 9],
'a2_24a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_25': [0, 1, 8, 9],
'a2_26a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_27': [0, 1, 8, 9],
'a2_28a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_29': [0, 1, 8, 9],
'a2_30': [0, 1, 8, 9],
'a2_31': [0, 1, 8, 9],
'a2_32': [0, 1, 8, 9],
'a2_33a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_34': [0, 1, 8, 9],
'a2_35': [0, 1, 8, 9],
'a2_36': [0, 1, 8, 9],
'a2_37a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_38': [1, 2, 8, 9],
'a2_39_1': [1, 2, 3, 4, 8, 9],
'a2_39_2': [1, 2, 3, 4, 8, 9],
'a2_40': [0, 1, 8, 9],
'a2_41a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_42': [0, 1, 8, 9],
'a2_43': [0, 1, 8, 9],
'a2_44': [1, 2, 3, 8, 9],
'a2_45': [0, 1, 8, 9],
'a2_46a': [1, 2, 3, 4, 8, 9],
'a2_47': [0, 1, 8, 9],
'a2_48a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_49': [0, 1, 8, 9],
'a2_50': [0, 1, 8, 9],
'a2_51': [0, 1, 8, 9],
'a2_52': [0, 1, 8, 9],
'a2_53': [0, 1, 8, 9],
'a2_54a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_55': [0, 1, 8, 9],
'a2_56': [0, 1, 8, 9],
'a2_57': [0, 1, 8, 9],
'a2_58a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_59': [1, 2, 3, 8, 9],
'a2_60': [0, 1, 8, 9],
'a2_61': [0, 1, 8, 9],
'a2_62a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_63_1': [1, 2, 8, 9],
'a2_63_2': [1, 2, 8, 9],
'a2_64': [0, 1, 8, 9],
'a2_65a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_66': [1, 2, 8, 9],
'a2_67': [0, 1, 8, 9],
'a2_68a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_69': [0, 1, 8, 9],
'a2_70a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_71': [1, 2, 8, 9],
'a2_72': [0, 1, 8, 9],
'a2_73a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_74': [0, 1, 8, 9],
'a2_75': [1, 2, 8, 9],
'a2_76a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_77': [0, 1, 8, 9],
'a2_78': [0, 1, 8, 9],
'a2_79a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_80': [1, 2, 8, 9],
'a2_81': [0, 1, 8, 9],
'a2_82': [0, 1, 8, 9],
'a2_83a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_84': [0, 1, 8, 9],
'a2_85': [0, 1, 8, 9],
'a2_86a': [1, 2, 3, 4, 5, 6, 8, 9],
'a2_87_1': [0, 1, 8, 9],
'a2_87_2': [0, 1, 8, 9],
'a2_87_3': [0, 1, 8, 9],
'a2_87_4': [0, 1, 8, 9],
'a2_87_5': [0, 1, 8, 9],
'a2_87_6': [0, 1, 8, 9],
'a2_87_7': [0, 1, 8, 9],
'a2_87_8': [0, 1, 8, 9],
'a2_87_9': [0, 1, 8, 9],
'a2_87_10a': [0, 1, 8, 9],
'a3_01': [0, 1, 8, 9],
'a3_02': [0, 1, 8, 9],
'a3_03': [0, 1, 8, 9],
'a3_04': [0, 1, 8, 9],
'a3_05': [0, 1, 8, 9],
'a3_06': [0, 1, 8, 9],
'a3_07': [0, 1, 8, 9],
'a3_08a': [1, 2, 3, 4, 5, 6, 8, 9],
'a3_09': [0, 1, 8, 9],
'a3_10': [0, 1, 8, 9],
'a3_11a': [1, 2, 3, 4, 5, 6, 8, 9],
'a3_12': [0, 1, 8, 9],
'a3_13': [0, 1, 8, 9],
'a3_14': [0, 1, 8, 9],
'a3_15': [0, 1, 8, 9],
'a3_16a': [1, 2, 3, 4, 5, 6, 8, 9],
'a3_17': [0, 1, 8, 9],
'a3_18': [0, 1, 8, 9],
'a3_19': [0, 1, 8, 9],
'a3_20': [0, 1, 8, 9],
'a4_01': [0, 1, 8, 9],
'a4_02_1': [0, 1, 8, 9],
'a4_02_2': [0, 1, 8, 9],
'a4_02_3': [0, 1, 8, 9],
'a4_02_4': [0, 1, 8, 9],
'a4_02_5a': [0, 1, 8, 9],
'a4_02_6': [0, 1, 8, 9],
'a4_02_7': [0, 1, 8, 9],
'a4_05': [0, 1, 8, 9],
'a4_06': [1, 2, 3, 8, 9],
'a5_01_1': [0, 1, 8, 9],
'a5_01_2': [0, 1, 8, 9],
'a5_01_3': [0, 1, 8, 9],
'a5_01_4': [0, 1, 8, 9],
'a5_01_5': [0, 1, 8, 9],
'a5_01_6': [0, 1, 8, 9],
'a5_01_7': [0, 1, 8, 9],
'a5_01_8': [0, 1, 8, 9],
'a5_01_9a': [0, 1, 8, 9],
'a5_02': [0, 1, 8, 9],
'a5_03': [0, 1, 8, 9],
'a5_04a': [1, 2, 3, 4, 5, 6, 8, 9],
'a6_01': [0, 1, 8, 9],
'a6_02_1': [0, 1, 8, 9],
'a6_02_2': [0, 1, 8, 9],
'a6_02_3': [0, 1, 8, 9],
'a6_02_4': [0, 1, 8, 9],
'a6_02_5': [0, 1, 8, 9],
'a6_02_6': [0, 1, 8, 9],
'a6_02_7': [0, 1, 8, 9],
| |
# The following source code was originally obtained from:
# https://github.com/rootpy/rootpy/blob/master/rootpy/tree/tree.py
# ==============================================================================
# Copyright (c) 2012-2017, The rootpy developers
# All rights reserved.
#
# Please refer to LICENSE.rootpy for the license terms.
# ==============================================================================
"""This module provides Tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
from six.moves import range
from collections import OrderedDict
from .defaults import log
from .treebuffer import TreeBuffer
class BaseTree(object):
"""
A base class for Tree.
"""
def __init__(self, tree,
read_branches_on_demand=False,
always_read=None):
if not hasattr(tree, '__iter__') or not hasattr(tree, '__contains__'):
raise RuntimeError("unable to initialize Tree")
self._tree = tree
# only set _buffer if it does not exist
if not hasattr(self, '_buffer'):
self._buffer = TreeBuffer()
self._read_branches_on_demand = read_branches_on_demand
if always_read is None:
self._always_read = []
else:
self._always_read = always_read
#self._branch_cache = {}
self._inited = True # affects __setattr__ and __getattr__ behaviors
@classmethod
def branch_type(cls, branch):
"""
Return the string representation for the type of a branch
"""
typename = branch.GetClassName()
if not typename:
leaf = branch.GetListOfLeaves()[0]
typename = leaf.GetTypeName()
# check if leaf has multiple elements
leaf_count = leaf.GetLeafCount()
if leaf_count:
length = leaf_count.GetMaximum()
else:
length = leaf.GetLen()
if length > 1:
typename = '{0}[{1:d}]'.format(typename, length)
return typename
@classmethod
def branch_is_supported(cls, branch):
"""
Currently the branch must only have one leaf but the leaf may have one
or multiple elements
"""
return branch.GetNleaves() == 1
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if BaseTree.branch_is_supported(branch):
bufferdict[branch.GetName()] = BaseTree.branch_type(branch)
elif not ignore_unsupported:
raise TypeError(
"branch `{0}` is unsupported".format(branch.GetName()))
else:
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported))
def update_buffer(self, treebuffer, transfer_objects=False):
"""
Merge items from a TreeBuffer into this Tree's TreeBuffer
Parameters
----------
buffer : rootpy.tree.buffer.TreeBuffer
The TreeBuffer to merge into this Tree's buffer
transfer_objects : bool, optional (default=False)
If True then all objects and collections on the input buffer will
be transferred to this Tree's buffer.
"""
self.set_buffer(treebuffer, transfer_objects=transfer_objects)
def set_buffer(self, treebuffer,
branches=None,
ignore_branches=None,
create_branches=False,
visible=True,
ignore_missing=False,
ignore_duplicates=False,
transfer_objects=False):
"""
Set the Tree buffer
Parameters
----------
treebuffer : rootpy.tree.buffer.TreeBuffer
a TreeBuffer
branches : list, optional (default=None)
only include these branches from the TreeBuffer
ignore_branches : list, optional (default=None)
ignore these branches from the TreeBuffer
create_branches : bool, optional (default=False)
If True then the branches in the TreeBuffer should be created.
Use this option if initializing the Tree. A ValueError is raised
if an attempt is made to create a branch with the same name as one
that already exists in the Tree. If False the addresses of existing
branches will be set to point at the addresses in this buffer.
visible : bool, optional (default=True)
If True then the branches will be added to the buffer and will be
accessible as attributes of the Tree.
ignore_missing : bool, optional (default=False)
If True then any branches in this buffer that do not exist in the
Tree will be ignored, otherwise a ValueError will be raised. This
option is only valid when ``create_branches`` is False.
ignore_duplicates : bool, optional (default=False)
If False then raise a ValueError if the tree already has a branch
with the same name as an entry in the buffer. If True then skip
branches that already exist. This option is only valid when
``create_branches`` is True.
transfer_objects : bool, optional (default=False)
If True, all tree objects and collections will be transferred from
the buffer into this Tree's buffer.
"""
# determine branches to keep while preserving branch order
if branches is None:
branches = treebuffer.keys()
if ignore_branches is not None:
branches = [b for b in branches if b not in ignore_branches]
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
self.SetBranchAddress(name, value)
elif not ignore_missing:
raise ValueError(
"Attempting to set address for "
"branch `{0}` which does not exist".format(name))
else:
log.warning(
"Skipping entry in buffer for which no "
"corresponding branch in the "
"tree exists: `{0}`".format(name))
self._buffer.update(treebuffer)
if transfer_objects:
self._buffer.set_objects(treebuffer)
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, str):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self._glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1)
def deactivate(self, branches, exclusive=False):
"""
Deactivate branches
Parameters
----------
branches : str or list
branch or list of branches to deactivate
exclusive : bool, optional (default=False)
if True activate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 1)
if isinstance(branches, str):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self._glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 0)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 0)
@property
def branches(self):
"""
List of the branches
"""
return [branch for branch in self.GetListOfBranches()]
def iterbranches(self):
"""
Iterator over the branches
"""
for branch in self.GetListOfBranches():
yield branch
@property
def branchnames(self):
"""
List of branch names
"""
return [branch.GetName() for branch in self.GetListOfBranches()]
def iterbranchnames(self):
"""
Iterator over the branch names
"""
for branch in self.iterbranches():
yield branch.GetName()
def _glob(self, patterns, exclude=None):
"""
Return a list of branch names that match ``pattern``.
Exclude all matched branch names which also match a pattern in
``exclude``. ``exclude`` may be a string or list of strings.
Parameters
----------
patterns: str or list
branches are matched against this pattern or list of patterns where
globbing is performed with '*'.
exclude : str or list, optional (default=None)
branches matching this pattern or list of patterns are excluded
even if they match a pattern in ``patterns``.
Returns
-------
matches : list
List of matching branch names
"""
if isinstance(patterns, str):
patterns = [patterns]
if isinstance(exclude, str):
exclude = [exclude]
matches = []
for pattern in patterns:
matches += fnmatch.filter(self.iterbranchnames(), pattern)
if exclude is not None:
for exclude_pattern in exclude:
matches = [match for match in matches
if not fnmatch.fnmatch(match, exclude_pattern)]
return matches
def __iter__(self):
"""
Iterator over the entries in the Tree.
"""
if not self._buffer:
log.warning("buffer does not exist or is empty")
self.create_buffer()
if self._read_branches_on_demand:
self._buffer.set_tree(self)
# drop all branches from the cache
self.DropBranchFromCache('*')
for attr in self._always_read:
try:
branch = self._branch_cache[attr]
except KeyError: # one-time hit
branch = self.GetBranch(attr)
if not branch:
raise AttributeError(
"branch `{0}` specified in "
"`always_read` does not exist".format(attr))
self._branch_cache[attr] = branch
# add branches that we should always read to cache
self.AddBranchToCache(branch)
for i in range(self.GetEntries()):
# Only increment current entry.
# getattr on a branch will then GetEntry on only that branch
# see ``TreeBuffer.get_with_read_if_cached``.
self.LoadTree(i)
for attr in self._always_read:
# Always read branched in ``self._always_read`` since
# these branches may never be getattr'd but the TreeBuffer
# should always be updated to reflect their current values.
# This is useful if you are iterating over an input tree
# and writing to an output tree that shares the same
# TreeBuffer but you don't getattr on all branches of the
# input tree in the logic that determines which entries
# to keep.
self._branch_cache[attr].GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.next_entry()
self._buffer.reset_collections()
else:
for i in range(self.GetEntries()):
# Read all activated branches (can be slow!).
self.GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.reset_collections()
def __setattr__(self, attr, value):
# this test allows attributes to be set in the __init__ method
# any normal attributes are handled normally
if '_inited' not in self.__dict__ or attr in self.__dict__:
super(BaseTree, self).__setattr__(attr, value)
return
try:
setattr(self._buffer, attr, value)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __getattr__(self, attr):
if '_inited' not in self.__dict__:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
try:
return getattr(self._buffer, attr)
except AttributeError:
try:
return getattr(self._tree, attr)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __len__(self):
"""
Same as GetEntries
"""
return self.GetEntries()
def __contains__(self, branch):
"""
Same as has_branch
"""
return self.has_branch(branch)
def has_branch(self, branch):
"""
Determine | |
<reponame>edwarnicke/bomsh<filename>scripts/bomsh_hook2.py
#! /bin/env python3
# Copyright (c) 2022 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Bomsh hookup script to record raw info of input/output file checksums during software build.
Use by Bomsh or Bomtrace.
December 2021, <NAME>
"""
import argparse
import sys
import os
import subprocess
# for special filename handling with shell
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
TOOL_VERSION = '0.0.1'
VERSION = '%(prog)s ' + TOOL_VERSION
LEVEL_0 = 0
LEVEL_1 = 1
LEVEL_2 = 2
LEVEL_3 = 3
LEVEL_4 = 4
args = None
g_tmpdir = "/tmp"
g_create_bom_script = "/tmp/bomsh_create_bom.py"
g_raw_logfile = "/tmp/bomsh_hook_raw_logfile"
g_trace_logfile = "/tmp/bomsh_hook_trace_logfile"
g_logfile = "/tmp/bomsh_hook_logfile"
g_cc_compilers = ["/usr/bin/gcc", "/usr/bin/clang", "/usr/bin/cc", "/usr/bin/g++"]
g_cc_linkers = ["/usr/bin/ld", "/usr/bin/ld.bfd", "/usr/bin/gold"]
# list of binary converting programs of the same file
g_samefile_converters = ["/usr/bin/strip", "/usr/bin/ranlib", "/usr/bin/eu-strip", "./tools/objtool/objtool", "/usr/lib/rpm/debugedit", "/usr/lib/rpm/sepdebugcrcfix", "./scripts/sortextable"]
g_embed_bom_after_commands = g_cc_compilers + g_cc_linkers + ["/usr/bin/eu-strip",]
g_last_embed_outfile_checksum = ''
# a flag to skip bom-id-embedding for a shell command
g_not_embed_bom_flag = False
#
# Helper routines
#########################
def verbose(string, level=1, logfile=None):
"""
Prints information to stdout depending on the verbose level.
:param string: String to be printed
:param level: Unsigned Integer, listing the verbose level
:param logfile: file to write, if not provided, g_logfile is used
"""
if args.verbose >= level:
afile = g_logfile
if logfile:
afile = logfile
if afile:
append_text_file(afile, string + "\n")
# also print to stdout, but be aware that it is not reliable
# since stdout may be closed when running under BOMSH hook
#print(string)
def write_text_file(afile, text):
'''
Write a string to a text file.
:param afile: the text file to write
'''
with open(afile, 'w') as f:
return f.write(text)
def append_text_file(afile, text):
'''
Append a string to a text file.
:param afile: the text file to write
'''
with open(afile, 'a+') as f:
return f.write(text)
def read_text_file(afile):
'''
Read a text file as a string.
:param afile: the text file to read
'''
with open(afile, 'r') as f:
return (f.read())
def get_shell_cmd_output(cmd):
"""
Returns the output of the shell command "cmd".
:param cmd: the shell command to execute
"""
#print (cmd)
output = subprocess.check_output(cmd, shell=True, universal_newlines=True)
return output
# def find_specific_file_in_modification_time_order(builddir, filename):
def find_specific_file(builddir, filename):
"""
Find all files with a specific filename in the build dir, excluding symbolic link files.
// The search results are planned to be ordered by file modification time, but not yet.
It simply runs the shell's find command and saves the result.
:param builddir: String, build dir of the workspace
:param filename: String, a specific filename, like libosc.so/lib4arg.so
:returns a list that contains all the binary file names.
"""
# findcmd = "find " + cmd_quote(builddir) + " -type f -name '" + filename + "' -exec ls -1t '{}' + || true "
findcmd = "find " + cmd_quote(builddir) + " -type f -name '" + filename + "' -print || true "
#print(findcmd)
output = subprocess.check_output(findcmd, shell=True, universal_newlines=True)
files = output.splitlines()
#print(len(files))
if len(files) > 1:
verbose("Warning: filename: " + filename + " multiple files found: " + str(files), LEVEL_2)
return files
############################################################
#### Start of shell command read/parse routines ####
############################################################
'''
Format of /tmp/bomsh_cmd file, which records shell command info:
pid: 75627 ppid: 75591 pgid: 73910
/home/OpenOSC/src
/usr/bin/gcc
gcc -DHAVE_CONFIG_H -I. -I.. -Wsign-compare -U_FORTIFY_SOURCE -fno-stack-protector -g -O2 -MT libopenosc_la-openosc_fortify_map.lo -MD -MP -MF .deps/libopenosc_la-openosc_fortify_map.Tpo -c openosc_fortify_map.c -fPIC -DPIC -o .libs/libopenosc_la-openosc_fortify_map.o
'''
def read_shell_command(shell_cmd_file):
"""
Read the shell command from file and return (pwd, prog, argv_str)
:param shell_cmd_file: the file that contains the shell command
"""
contents = read_text_file(shell_cmd_file)
#verbose("cmd_file: " + shell_cmd_file + " contents:\n" + contents, LEVEL_2)
lines = contents.splitlines()
pid = ''
pwd = ''
prog = ''
# omitting the pid line should still work
if lines and contents[:5] == "pid: ":
pid = lines[0]
lines = lines[1:]
if lines:
pwd = lines[0]
if len(lines) > 1:
prog = lines[1]
ret = (pid, pwd, prog, '\n'.join(lines[2:]))
verbose("cmd_file: " + shell_cmd_file + " return tuple: " + str(ret), LEVEL_2)
return ret
############################################################
#### End of shell command read/parse routines ####
############################################################
def is_cc_compiler(prog):
"""
Whether a program (absolute path) is C compiler.
"""
return prog in g_cc_compilers
def is_cc_linker(prog):
"""
Whether a program (absolute path) is C linker.
"""
return prog in g_cc_linkers
def is_golang_prog(prog):
"""
Whether a program (absolute path) is golang compiler/linker.
/usr/lib/go-1.13/pkg/tool/linux_amd64/compile, /usr/lib/go-1.13/pkg/tool/linux_amd64/link
/usr/lib/golang/pkg/tool/linux_amd64/compile, /usr/lib/golang/pkg/tool/linux_amd64/link
"""
if "lib/go" not in prog or "pkg/tool" not in prog:
return False
return os.path.basename(prog) in ("compile", "link")
def get_input_files_from_subfiles(subfiles, outfile):
"""
Returns the input files only, excluding the outfile
:param subfiles: the list of all files, including the outfile
:param outfile: the output file, to filter out from the subfiles
"""
return [f for f in subfiles if f != outfile]
'''
root@<PASSWORD>:/home/linux-kernel-gitdir# more arch/x86/boot/compressed/piggy.S
.section ".rodata..compressed","a",@progbits
.globl z_input_len
z_input_len = 8076046
.globl z_output_len
z_output_len = 30524908
.globl input_data, input_data_end
input_data:
.incbin "arch/x86/boot/compressed/vmlinux.bin.gz"
input_data_end:
root@<PASSWORD>:/home/linux-kernel-gitdir#
'''
def handle_linux_kernel_piggy_object(outfile, infiles, pwd):
"""
Special handling on Linux kernel piggy.o build, which piggybacks compressed vmlinux in its data section.
gcc -Wp,-MD,arch/x86/boot/compressed/.piggy.o.d -nostdinc -isystem /usr/lib/gcc/x86_64-linux-gnu/9/include -I./arch/x86/include -I./arch/x86/include/generated -I./include -I./arch/x86/include/uapi -I./arch/x86/include/generated/uapi -I./include/uapi -I./include/generated/uapi -include ./include/linux/kconfig.h -D__KERNEL__ -m64 -O2 -fno-strict-aliasing -fPIE -DDISABLE_BRANCH_PROFILING -mcmodel=small -mno-mmx -mno-sse -ffreestanding -fno-stack-protector -Wno-address-of-packed-member -D__ASSEMBLY__ -c -o arch/x86/boot/compressed/piggy.o arch/x86/boot/compressed/piggy.S
:param outfile: the output file
:param infiles: the list of input files
:param pwd: the present working directory for this gcc command
"""
if not infiles or outfile[-7:] != "piggy.o":
return infiles
piggy_S_file = ''
for afile in infiles:
if afile[-7:] == "piggy.S":
piggy_S_file = os.path.abspath(os.path.join(pwd, afile))
break
if not piggy_S_file or not os.path.isfile(piggy_S_file):
return infiles
lines = read_text_file(piggy_S_file).splitlines()
vmlinux_bin = ''
for line in lines:
if line[:9] == '.incbin "':
vmlinux_bin = line[9: len(line)-4] # directly get vmlinux.bin instead of vmlinux.bin.gz
vmlinux_bin = os.path.abspath(os.path.join(pwd, vmlinux_bin))
break
if vmlinux_bin and os.path.isfile(vmlinux_bin):
return infiles + [vmlinux_bin,] # add vmlinux.bin file to the list of input files
return infiles
'''
root@<KEY>:/home/gohello# more /tmp/go-build426453512/b001/importcfg
# import config
packagefile fmt=/tmp/go-build426453512/b002/_pkg_.a
packagefile runtime=/tmp/go-build426453512/b005/_pkg_.a
root@<KEY>:/home/gohello# more /tmp/go-build819882048/b001/importcfg.link
packagefile _/home/gohello=/tmp/go-build819882048/b001/_pkg_.a
packagefile errors=/tmp/go-build819882048/b003/_pkg_.a
packagefile internal/fmtsort=/tmp/go-build819882048/b012/_pkg_.a
root@a<PASSWORD>:/home/gohello#
'''
def handle_golang_importcfg(outfile, infiles, pwd):
"""
Special handling on golang importcfg.
/usr/lib/go-1.13/pkg/tool/linux_amd64/compile -o /tmp/go-build426453512/b001/_pkg_.a -trimpath /tmp/go-build426453512/b001=> -p main -complete -buildid ay67G1S8EmRK
Leyd8dwY/ay67G1S8EmRKLeyd8dwY -goversion go1.13.8 -D _/home/gohello -importcfg /tmp/go-build426453512/b001/importcfg -pack -c=8 /home/gohello/main.go
/usr/lib/golang/pkg/tool/linux_amd64/link -o /tmp/go-build246437691/b001/exe/a.out -importcfg /tmp/go-build246437691/b001/importcfg.link -buildmode=exe -buildid=m01KuIh_BbmsX7huT2rC/tj8kQfHNdWp1zM6Gb7Ig/g50bYJrn9NLErZBX7dr0/m01KuIh_BbmsX7huT2rC -extld=gcc /tmp/go-build246437691/b001/_pkg_.a
:param outfile: the output file
:param infiles: the list of input files
:param pwd: the present working directory for this golang command
"""
if not infiles:
return infiles
importcfg_file = ''
for afile in infiles:
# only add it for link, not for compile, otherwise, the search_cve result is too redundant
if afile[-14:] == "importcfg.link":
#if afile[-9:] == "importcfg" or afile[-14:] == "importcfg.link":
if afile[0] != '/':
afile = os.path.join(pwd, afile)
importcfg_file = os.path.abspath(afile)
break
if not importcfg_file or not os.path.isfile(importcfg_file):
return infiles
lines = read_text_file(importcfg_file).splitlines()
packages = []
for line in lines:
if line[:12] == 'packagefile ':
tokens = line.split("=")
packages.append(tokens[1])
infiles.extend(packages) # add packages to the list of infiles
return infiles
def get_all_subfiles_in_gcc_cmdline(gccline, pwd, prog):
"""
Returns the input/output files of the gcc shell command line.
:param gccline: the gcc command line
:param pwd: the present working directory for this gcc command
:param prog: the program binary
"""
if " -o " not in gccline and " -c " not in gccline:
verbose("Warning: no output file for gcc line: " + gccline)
return ('', [])
tokens = gccline.split()
if " -o " in gccline:
oindex = tokens.index("-o")
output_file = tokens[oindex + 1]
else: # must have " -c " in gcc_line
compile_file = tokens[-1] # let's use the last token as compile file
tokens2 = compile_file.split(".")
tokens2[-1] = "o"
output_file = ".".join(tokens2)
if output_file[0] != '/':
output_file = os.path.join(pwd, output_file)
output_file = os.path.abspath(output_file)
skip_token_list = ("-MT", "-MF", "-x", "-I", "-B", "-L", "-isystem", "-iquote", "-idirafter", "-iprefix", "-isysroot", "-iwithprefix", "-iwithprefixbefore", "-imultilib", "-include")
subfiles = []
skip_token = False # flag for skipping one single token
for token in tokens[1:]:
# | |
g.dinner_choice == 'none':
no_dinners += 1
if s.num_dinners:
guests = ndb.get_multi(s.dinner_keys[:s.num_dinners])
for g in guests:
if g.first_name and g.last_name and g.dinner_choice:
ndinners += 1
s.total_golfers = total_golfers
s.adjusted_dinners = total_golfers - no_dinners + s.num_dinners
s.flag_dinners = True if no_dinners != s.num_golfers_no_dinner else False
s.net_due = s.payment_due - s.payment_made
if s.discount:
s.net_due -= s.discount
s.net_due = max(0, s.net_due)
if (s.net_due == 0 and
(golfers_complete < s.num_golfers or ndinners < s.num_golfers + s.num_dinners)):
sponsors.append(s)
nav = []
template_values = {
'sponsors': sponsors,
'incomplete': 'incomplete',
'nav': nav,
'capabilities': caps
}
self.response.out.write(render_to_string('viewsponsors.html', template_values))
class ViewUnpaid(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
q = q.order(Sponsor.sort_name)
sponsors = []
for s in q:
no_dinners = 0
total_golfers = s.num_golfers + s.num_golfers_no_dinner
if total_golfers:
golfers = ndb.get_multi(s.golfer_keys[:total_golfers])
for g in golfers:
if g.dinner_choice == 'none':
no_dinners += 1
s.total_golfers = total_golfers
s.adjusted_dinners = total_golfers - no_dinners + s.num_dinners
s.flag_dinners = True if no_dinners != s.num_golfers_no_dinner else False
s.net_due = s.payment_due - s.payment_made
if s.discount:
s.net_due -= s.discount
s.net_due = max(0, s.net_due)
if s.net_due > 0:
sponsors.append(s)
nav = []
template_values = {
'sponsors': sponsors,
'incomplete': 'unpaid',
'nav': nav,
'capabilities': caps
}
self.response.out.write(render_to_string('viewsponsors.html', template_values))
class ViewDinnerSurvey(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
q = q.order(Sponsor.sort_name)
sponsors = []
for s in q:
if s.num_golfers > 0 and s.email:
sponsors.append(s)
nav = []
template_values = {
'sponsors': sponsors,
'nav': nav,
'capabilities': caps
}
self.response.out.write(render_to_string('dinnersurvey.html', template_values))
class ViewUnconfirmed(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == False)
q = q.order(Sponsor.timestamp)
sponsors = q.fetch(100)
for s in sponsors:
s.total_golfers = s.num_golfers + s.num_golfers_no_dinner
s.adjusted_dinners = s.num_golfers + s.num_dinners
s.flag_dinners = True if no_dinners != s.num_golfers_no_dinner else False
s.net_due = s.payment_due - s.payment_made
if s.discount:
s.net_due -= s.discount
s.net_due = max(0, s.net_due)
nav = []
template_values = {
'sponsors': sponsors,
'incomplete': 'unconfirmed',
'nav': nav,
'capabilities': caps
}
self.response.out.write(render_to_string('viewsponsors.html', template_values))
class ViewGolfers(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
# html = memcache.get('%s/admin/view/golfers' % t.name)
# if html:
# self.response.out.write(html)
# return
all_golfers = []
counter = 1
q = Sponsor.query(ancestor = t.key)
q = q.filter(Sponsor.confirmed == True)
q = q.order(Sponsor.sort_name)
for s in q:
total_golfers = s.num_golfers + s.num_golfers_no_dinner
if total_golfers == 0:
continue
golfers = ndb.get_multi(s.golfer_keys[:total_golfers])
for g in golfers:
all_golfers.append(ViewGolfer(t, s, g, counter))
counter += 1
for i in range(len(golfers) + 1, total_golfers + 1):
g = Golfer(tournament = t.key, sponsor = s.key, sequence = i,
sort_name = '', first_name = '', last_name = '', gender = '',
company = '', address = '', city = '', phone = '', email = '',
handicap_index = 0.0, average_score = '', ghin_number = '',
shirt_size = '', dinner_choice = '')
all_golfers.append(ViewGolfer(t, s, g, counter))
counter += 1
shirt_sizes = { }
for g in all_golfers:
key = g.golfer.shirt_size if g.golfer.shirt_size else 'unspecified'
if not key in shirt_sizes:
shirt_sizes[key] = 0
shirt_sizes[key] += 1
template_values = {
'golfers': all_golfers,
'shirt_sizes': shirt_sizes,
'capabilities': caps
}
html = render_to_string('viewgolfers.html', template_values)
self.response.out.write(html)
class ViewGolfersByName(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
all_golfers = []
q = Golfer.query()
q = q.filter(Golfer.tournament == t.key)
q = q.filter(Golfer.active == True)
q = q.order(Golfer.sort_name)
counter = 1
for g in q:
s = g.sponsor.get()
all_golfers.append(ViewGolfer(t, s, g, counter))
counter += 1
template_values = {
'golfers': all_golfers,
'capabilities': caps
}
html = render_to_string('viewgolfersbyname.html', template_values)
self.response.out.write(html)
class UpdateHandicap(webapp2.RequestHandler):
def get(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, self.request.uri)
return
start = 1
if self.request.get('start'):
start = int(self.request.get('start'))
all_golfers = []
q = Golfer.query()
q = q.filter(Golfer.tournament == t.key)
q = q.filter(Golfer.active == True)
q = q.order(Golfer.sort_name)
golfer_keys = q.fetch(offset = start - 1, limit = 21, keys_only = True)
prev_page_offset = 0 if start == 1 else max(1, start - 20)
next_page_offset = 0
if len(golfer_keys) == 21:
next_page_offset = start + 20
golfer_keys = golfer_keys[:20]
counter = start
golfers = ndb.get_multi(golfer_keys)
for g in golfers:
s = g.sponsor.get()
vg = ViewGolfer(t, s, g, counter)
h = hashlib.md5()
h.update(g.ghin_number or '-')
h.update(vg.handicap_index_str or '-')
h.update(g.average_score or '-')
h.update('t' if g.index_info_modified else 'f')
vg.index_hash = h.hexdigest()
all_golfers.append(vg)
counter += 1
template_values = {
'golfers': all_golfers,
'prev_page_offset': prev_page_offset,
'this_page_offset': start,
'next_page_offset': next_page_offset,
'count': len(golfers),
'capabilities': caps
}
html = render_to_string('viewgolfers-index.html', template_values)
self.response.out.write(html)
def post(self):
t = tournament.get_tournament()
caps = capabilities.get_current_user_caps()
if caps is None or not caps.can_view_registrations:
show_login_page(self.response.out, '/admin/view/golfers/handicap')
return
prev_page_offset = int(self.request.get('prev_page_offset'))
this_page_offset = int(self.request.get('this_page_offset'))
next_page_offset = int(self.request.get('next_page_offset'))
count = int(self.request.get('count'))
golfers_to_update = []
for i in range(this_page_offset, this_page_offset + count):
key = ndb.Key(urlsafe = self.request.get('key_%d' % i))
index_hash = self.request.get('hash_%d' % i)
ghin_number = self.request.get('ghin_%d' % i)
handicap_index = self.request.get('index_%d' % i)
average_score = self.request.get('avg_score_%d' % i)
modified_checkbox = self.request.get('modified_%d' % i)
h = hashlib.md5()
h.update(ghin_number or '-')
h.update(handicap_index or '-')
h.update(average_score or '-')
h.update('t' if modified_checkbox else 'f')
if h.hexdigest() == index_hash:
continue
try:
g = key.get()
except:
logging.error("Invalid key for golfer #%d" % i)
continue
logging.info("Updating handicap info for golfer #%d" % i)
g.ghin_number = ghin_number
if handicap_index:
try:
val = float(handicap_index)
if not g.has_index or val != g.handicap_index:
g.handicap_index = val
g.has_index = True
except ValueError:
logging.error("Invalid handicap index '%s'" % handicap_index)
else:
g.handicap_index = 0.0
g.has_index = False
g.average_score = average_score
g.index_info_modified = False
golfers_to_update.append(g)
if golfers_to_update:
ndb.put_multi(golfers_to_update)
if self.request.get('prevpage'):
self.redirect('/admin/view/golfers/handicap?start=%d' % prev_page_offset)
elif self.request.get('nextpage'):
self.redirect('/admin/view/golfers/handicap?start=%d' % next_page_offset)
else:
self.redirect('/admin/view/golfers/handicap?start=%d' % this_page_offset)
class JsonBuilder:
def __init__(self, t):
self.t = t
self.build_teams()
self.build_golfers_and_groups()
self.check_consistency()
def build_teams(self):
"""
Build the list of teams, along with side data structures mapping golfers
to teams and teams to golfers.
golfers_by_team is an array indexed by (team_num - 1), where each element
is an array of golfer IDs.
teams_by_golfer_id_fwd is a table that maps each golfer id to a team,
based on the forward links from the Team entity to Golfer entities.
"""
self.teams = []
self.teams_by_id = {}
self.golfers_by_team = []
self.golfer_nums_by_id = {}
self.teams_by_golfer_id_fwd = {}
q = Team.query(ancestor = self.t.key).order(Team.name)
for t in q:
t_id = t.key.id()
team_num = len(self.teams) + 1
team = {
'team_num': team_num,
'key': t_id,
'name': t.name,
'golfer_nums': [],
'pairing_prefs': t.pairing or ''
}
self.teams.append(team)
self.teams_by_id[t_id] = team_num
golfer_ids = []
if t.golfers:
for g_key in t.golfers:
g_id = g_key.id()
golfer_ids.append(g_id)
if g_id in self.teams_by_golfer_id_fwd:
other_team_num = self.teams_by_golfer_id_fwd[g_id]
other_team = teams[other_team_num - 1]
logging.warning("Golfer %d is contained by teams \"%s\" and \"%s\"" % (g_id, other_team['name'], t.name))
else:
self.teams_by_golfer_id_fwd[g_id] = team_num
self.golfers_by_team.append(golfer_ids)
def build_golfers_and_groups(self):
"""
Build the list of golfers and the list of groups, along with a side data
structure mapping golfers to teams.
teams_by_golfer_id_rev is a table that maps each golfer id to a team,
based on the reverse links from each Golfer entities to a Team entity.
When there is disagreement, we use this mapping as the "truth".
"""
self.golfers = []
self.substitutes = []
self.groups = []
self.teams_by_golfer_id_rev = {}
q = Sponsor.query(ancestor = self.t.key).filter(Sponsor.confirmed == True).order(Sponsor.sort_name)
for s in q:
total_golfers = s.num_golfers + s.num_golfers_no_dinner
if total_golfers == 0:
continue
group_golfer_nums = []
for g in ndb.get_multi(s.golfer_keys[:total_golfers]):
g_id = g.key.id()
golfer_num = len(self.golfers) + 1
team = None
team_num = 0
if g.team:
try:
team = g.team.get()
except:
logging.warning("Dangling reference from golfer %d (sponsor id %d) to deleted team" % (g_id, s.sponsor_id))
g.team = None
g.put()
if team:
t_id = team.key.id()
team_num = self.teams_by_id[t_id]
if not g_id in self.teams_by_golfer_id_fwd:
logging.warning("Golfer %d (sponsor id %d) refers to team \"%s\", but no team contains golfer" % (g_id, s.sponsor_id, t.name))
elif self.teams_by_golfer_id_fwd[g_id] != team_num:
other_team_num = self.teams_by_golfer_id_fwd[g_id]
other_team = self.teams[other_team_num - 1]
logging.warning("Golfer %d (sponsor id %d) refers to team \"%s\", but is contained by team \"%s\"" % (g_id, s.sponsor_id, t.name, other_team['name']))
else:
t_id = -1
self.teams_by_golfer_id_rev[g_id] = team_num
if g_id in self.teams_by_golfer_id_fwd:
team_num = self.teams_by_golfer_id_fwd[g_id]
else:
team_num = 0
if g.substitute:
g_sub = g.substitute.get()
hdcp_index = get_handicap_index(g_sub)
if hdcp_index is not None:
hdcp_index = "%.1f" % hdcp_index
else:
hdcp_index = "-"
substitute = {
'key': g.substitute.id(),
'first_name': g_sub.first_name,
'last_name': g_sub.last_name,
'gender': g_sub.gender,
'ghin': g_sub.ghin_number,
'avg': g_sub.average_score,
'index': hdcp_index
}
self.substitutes.append(substitute)
substitute_index = len(self.substitutes)
else:
substitute_index = 0
hdcp_index = get_handicap_index(g)
if hdcp_index is not None:
hdcp_index = "%.1f" % hdcp_index
else:
hdcp_index = "-"
h = hashlib.md5()
h.update(','.join(str(x) for x in [t_id, g.cart]))
golfer = {
'golfer_num': golfer_num,
'group_num': len(self.groups) + 1,
'team_num': team_num,
'key': g_id,
'first_name': g.first_name,
'last_name': g.last_name,
'gender': g.gender,
'index': hdcp_index,
'cart': g.cart,
'substitute': substitute_index,
'md5': h.hexdigest()
}
if team_num:
team = self.teams[team_num - 1]
team['golfer_nums'].append(golfer_num)
else:
group_golfer_nums.append(golfer_num)
self.golfers.append(golfer)
group = {
'group_num': len(self.groups) + 1,
'key': s.key.id(),
'id': str(s.sponsor_id),
'first_name': s.first_name,
'last_name': s.last_name,
'golfer_nums': group_golfer_nums,
'pairing_prefs': s.pairing
}
self.groups.append(group)
def check_consistency(self):
for t in range(1, len(self.teams) + 1):
team = self.teams[t - 1]
for g_id in self.golfers_by_team[t - 1]:
if not g_id in self.teams_by_golfer_id_rev:
logging.warning("Team %d \"%s\" (%d) contains golfer %d, but golfer does not exist" % (t, team['name'], team['key'], g_id))
elif self.teams_by_golfer_id_rev[g_id] != t:
logging.warning("Team %d \"%s\" (%d) contains golfer %d, but golfer does not refer to team" % (t, team['name'], team['key'], g_id))
if not team['golfer_nums']:
logging.warning("Empty team \"%s\" (%d)" % (team['name'], team['key']))
def groups_json(self):
return json.dumps(self.groups)
def teams_json(self):
return json.dumps(self.teams)
def golfers_json(self):
return json.dumps(self.golfers)
def substitutes_json(self):
return json.dumps(self.substitutes)
class TeamsUpdater:
def __init__(self, t, golfers_json, substitutes_json, groups_json, teams_json):
self.t = t
self.golfers = json.loads(golfers_json)
self.substitutes = json.loads(substitutes_json)
self.groups = json.loads(groups_json)
self.teams = json.loads(teams_json)
def update_teams_pass1(self):
self.team_entities = []
self.golfers_by_id = {}
for t in self.teams:
team_id = t['key']
if team_id:
team = Team.get_by_id(int(team_id), parent = self.t.key)
if not team:
logging.error("no team with id %s" % team_id)
continue
else:
team = Team(parent = self.t.key)
team.name = t['name']
team.pairing = t['pairing_prefs']
self.team_entities.append((t['team_num'], team))
for golfer_num in t['golfer_nums']:
g_id = self.golfers[golfer_num - 1]['key']
self.golfers_by_id[g_id] = (None, False)
# logging.debug("Update teams pass 1: team %d golfer %d" % (t['team_num'], g_id))
def update_golfers_pass1(self):
for g in self.golfers:
g_id = g['key']
team_num = g['team_num']
if team_num:
t_id = self.teams[team_num - 1]['key']
if not t_id:
t_id = 0
else:
t_id = -1
h = hashlib.md5()
h.update(','.join(str(x) for x in [t_id, g['cart']]))
modified = h.hexdigest() != g['md5']
# logging.debug("Update golfers pass 1: team %d golfer %d (%s)" % (g['team_num'], g_id, "modified" if modified else "not modified"))
if modified or g_id in self.golfers_by_id:
group_num = g['group_num']
group = self.groups[group_num - 1]
s_id = group['key']
s = Sponsor.get_by_id(s_id, parent = self.t.key)
if not s:
logging.error("no sponsor with key %d" % s_id)
continue
golfer = Golfer.get_by_id(g_id)
if not golfer:
logging.error("no golfer with key %d" % g_id)
continue
if | |
"""
PINNACLE.
Publications from an Institute: Numbers, Networks, Authors and
Citations Looking for Excellence.
Copyright (c) 2020, <NAME>
MIT License:
https://github.com/IATE-CONICET-UNC/pinnacle/blob/master/LICENSE
"""
import ads
import pandas as pd
import pickle
import numpy as np
class inst_adsentries:
"""
inst_adsentries (class): Statistics of publications in an Institute.
Several methods to download and analyze bibliographic data.
"""
def __init__(self, config):
"""
Initialize an inst_adsentries object.
Parameters
----------
config:
staff:
pub_auth_all:
pub_auth_top:
pub_inst_all:
pub_inst_top:
"""
self.config = config.config
self.staff = []
self.history = {}
self.pub_auth_all = {}
self.pub_auth_top = {}
self.pub_inst_all = {}
self.pub_inst_top = {}
def sanity_check(self):
"""
Check if config parameters are OK.
Check if:
- directories exist
- files exist
- required parameters are defined
COMPLETAR
"""
return True
def data_loaded_check(self):
"""
Check if data has been loaded.
COMPLETAR.
"""
return True
def load_history(self, n_list, year_start):
"""
Load history for the number of staff members for an institute.
if interactive (bool) the list is returned.
"""
self.sanity_check()
year_end = year_start + len(n_list)
a = pd.to_datetime(range(year_start, year_end), format='%Y').year
df = pd.DataFrame()
df['pop'] = n_list
df.index = a
self.history = df
def load_staff(self, interactive=True):
"""
Load staff members for an institute.
if interactive (bool) the list is returned.
"""
self.sanity_check()
fname_staff = ''.join([self.config.dir_data, '/',
self.config.fname_staff])
with open(fname_staff) as f:
auth_names = f.read()
auth_names = auth_names.split('\n')
auth_names = auth_names[:-1]
self.staff = auth_names
if interactive:
return auth_names
def load_inst(self):
"""
Load bibliographic data from a pickle file.
Pickle file must be of the type writen by save_inst().
"""
self.sanity_check()
fname_pub_auth_all = ''.join([self.config.dir_data, '/',
self.config.fname_pub_auth_all, '_',
self.config.experiment_id, '.pk'])
fname_pub_auth_top = ''.join([self.config.dir_data, '/',
self.config.fname_pub_auth_top, '_',
self.config.experiment_id, '.pk'])
fname_pub_inst_all = ''.join([self.config.dir_data, '/',
self.config.fname_pub_inst_all, '_',
self.config.experiment_id, '.pk'])
fname_pub_inst_top = ''.join([self.config.dir_data, '/',
self.config.fname_pub_inst_top, '_',
self.config.experiment_id, '.pk'])
self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))
self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))
self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))
self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))
fname_pub_history = ''.join([self.config.dir_data, '/history_',
self.config.experiment_id, '.pk'])
self.history = pickle.load(open(fname_pub_history, 'rb'))
fname_pub_staff = ''.join([self.config.dir_data, '/staff_',
self.config.experiment_id, '.pk'])
self.staff = pickle.load(open(fname_pub_staff, 'rb'))
def download_inst(self, authors_list=[], rows_max=200):
"""
download_inst, function.
Given a list of author names, return a pandas dataframe
with the list of papers retrieved by the ADSABS service.
Parameters
----------
authors_list: list or string
A list containing the names of the authors.
Returns
-------
byauth: dataframe
A data frame containing the list of authors, number of papers
and the list of papers as "Article" instances.
"""
self.staff = authors_list
fl = ['id', 'bibcode', 'title', 'citation_count',
'aff', 'author', 'citation', 'pub', 'reference',
'metrics', 'year', 'read_count', 'pubdate']
authors = []
for auth in authors_list:
print(f"searching ADS for author: {auth}")
papers = list(ads.SearchQuery(author=auth, rows=rows_max, fl=fl))
authors.append(papers)
byauth = pd.DataFrame()
byauth['authors'] = authors_list
byauth['ppr_list'] = authors
# cantidad de papers por autor:
npprs = []
for p in authors:
npprs.append(len(p))
byauth['n_papers'] = npprs
# self.byauth = byauth
return byauth
def save_inst(self):
"""
Write bibliographic data to a pickle file.
The name of the file is taken from self.config.
"""
self.sanity_check()
self.data_loaded_check()
fname_pub_auth_all = ''.join([self.config.dir_data, '/',
self.config.fname_pub_auth_all, '_',
self.config.experiment_id, '.pk'])
fname_pub_auth_top = ''.join([self.config.dir_data, '/',
self.config.fname_pub_auth_top, '_',
self.config.experiment_id, '.pk'])
fname_pub_inst_all = ''.join([self.config.dir_data, '/',
self.config.fname_pub_inst_all, '_',
self.config.experiment_id, '.pk'])
fname_pub_inst_top = ''.join([self.config.dir_data, '/',
self.config.fname_pub_inst_top, '_',
self.config.experiment_id, '.pk'])
pickle.dump(self.pub_auth_all, open(fname_pub_auth_all, 'wb'))
pickle.dump(self.pub_auth_top, open(fname_pub_auth_top, 'wb'))
pickle.dump(self.pub_inst_all, open(fname_pub_inst_all, 'wb'))
pickle.dump(self.pub_inst_top, open(fname_pub_inst_top, 'wb'))
fname_pub_history = ''.join([self.config.dir_data, '/history_',
self.config.experiment_id, '.pk'])
pickle.dump(self.history, open(fname_pub_history, 'wb'))
fname_pub_staff = ''.join([self.config.dir_data, '/staff_',
self.config.experiment_id, '.pk'])
pickle.dump(self.staff, open(fname_pub_staff, 'wb'))
def save_table(self, table_name=None,
year_start=None, year_end=None):
"""
Write bibliographic data to a XLSX file.
The name of the file is taken from ?
"""
import pandas as pd
self.sanity_check()
self.data_loaded_check()
if table_name is None:
table_name = (f"{self.config.dir_data}/"
f"table_{self.config.experiment_id}.xlsx")
print(table_name)
writer = pd.ExcelWriter(table_name)
if (year_start is not None) and (year_end is not None):
tedges = np.arange(1999.5, 2021.5, 1)
years = np.arange(2000, 2021, 1)
else:
tedges = np.arange(self.history.index[0] - 0.5,
self.history.index[-1] + 1.5, 1)
years = np.arange(self.history.index[0],
self.history.index[-1] + 1, 1)
dfa = pd.DataFrame()
dfa['year'] = years
Ht = np.zeros(len(years))
auth_names = list(self.pub_auth_all.author1.unique())
for a in auth_names:
df = self.pub_auth_all[self.pub_auth_all['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y) == 0:
H = [[0] * (len(tedges) - 1), None]
else:
y = np.array(y)
H = np.histogram(y, bins=tedges)
dfa[a] = H[0]
Ht = Ht + H[0]
self.history['npapers_all'] = Ht
dfa.to_excel(writer, sheet_name='top')
Ht = np.zeros(len(years))
auth_names = list(self.pub_auth_top.author1.unique())
for a in auth_names:
df = self.pub_auth_top[self.pub_auth_top['author1'].isin([a])]
y = [int(i) for i in df.year.values]
if len(y) == 0:
H = [[0] * (len(tedges) - 1), None]
else:
y = np.array(y)
H = np.histogram(y, bins=tedges)
dfa[a] = H[0]
Ht = Ht + H[0]
self.history['npapers_top'] = Ht
dfa.to_excel(writer, sheet_name='top')
writer.save()
def journal_quality(self, custom_list=False):
"""
Filter non-indexed journals and proceedings.
This is based on a data file with a journals name list.
"""
# Obtain the list of all journals and write it to a file
# df_index = df_papers_inst[~df_papers_inst.duplicated(subset='pub')]
# filename = '../dat/journals.txt'
# with open(filename, 'w') as f:
# for item in df_index.pub:
# f.write("%s\n" % item)
# edit the file to leave only top indexed journals
# (delete proceedings, BAAA, etc.)
if custom_list:
filename = '../dat/journals_top.txt'
with open(filename) as f:
jnames = f.read()
jnames = jnames.split('\n')
jnames.pop()
filt_top_journals = self.pub_auth_all.pub.isin(jnames)
self.pub_auth_top = self.pub_auth_all[filt_top_journals]
filt_top_journals = self.pub_inst_all.pub.isin(jnames)
self.pub_inst_top = self.pub_inst_all[filt_top_journals]
def reduce_article_list(self, dfi, institution_keys=None):
"""
Return a DataFrame with a paper-based list of publications.
Parameters
----------
dfi: DataFrame, with the list of authors.
f: function
Given an ads.Article object, return criteria for author
membership with True/False.
Returns
-------
dfo: DataFrame, with the list of articles.
"""
self.sanity_check()
if institution_keys is None:
institution_keys = self.config.inst_strings
Ps = []
for a, x in zip(dfi.authors, dfi.ppr_list):
for p in x:
isinst = False
for aff in p.aff:
ncoinc = sum([word in aff for word in institution_keys])
thisis = ncoinc > 1
isinst = isinst or thisis
if isinst:
t = [a, p.id, p.bibcode, p.title, p.aff, p.author,
p.citation, p.pub, p.reference, p.year, p.pubdate,
p.citation_count, p.read_count]
Ps.append(t)
names = ['author1', 'id', 'bibcode', 'title', 'aff', 'authors',
'citation', 'pub', 'reference', 'year', 'pubdate',
'citation_count', 'read_count']
dfo = pd.DataFrame(Ps, columns=names)
self.pub_auth_all = dfo
def get_pub_scores(self, subset='auth_all'):
"""
Add to the DataFrames information about the quality of the journal.
Parameters
----------
self
"""
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import csv
from difflib import SequenceMatcher
import jellyfish
# self.sanity_check()
if subset == 'auth_top':
pubs = self.pub_auth_top['pub']
elif subset == 'auth_all':
pubs = self.pub_auth_all['pub']
elif subset == 'inst_top':
pubs = self.pub_inst_top['pub']
elif subset == 'inst_all':
pubs = self.pub_inst_all['pub']
# load publication metrics
# download stowords the first time
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def get_q(s):
q = 0
if "Q4" in s:
q = 4
if "Q3" in s:
q = 3
if "Q2" in s:
q = 2
if "Q1" in s:
q = 1
return q
stop_words = set(stopwords.words('english'))
journals = []
with open('scimagojr.csv', newline='') as csvfile:
s = csv.reader(csvfile, delimiter=';')
for row in s:
jname = row[2].lower()
word_tokens = word_tokenize(jname)
fname = [w for w in word_tokens if w not in stop_words]
sent1 = ' '.join(fname)
sent1 = sent1.replace('/', '')
row[2] = sent1
journals.append(row)
Q = []
for p in pubs:
jname = p.lower()
word_tokens = word_tokenize(jname)
fname = [w for w in word_tokens if w not in stop_words]
sent1 = ' '.join(fname)
sent1 = sent1.replace('/', '')
match = 0
J = ""
for Journal in journals:
journal = Journal[2]
s1 = similar(sent1, journal)
s2 = jellyfish.jaro_winkler(sent1, journal)
if s1 > 0.9 and s2 > 0.9:
match += 1
J = Journal[-1]
Q.append(get_q(J))
if subset == 'auth_top':
self.pub_auth_top['Q'] = Q
elif subset == 'auth_all':
self.pub_auth_all['Q'] = Q
elif subset == 'inst_top':
self.pub_inst_top['Q'] = Q
elif subset == 'inst_all':
self.pub_inst_all['Q'] = Q
def get_papers_by_authors(authors_list, rows_max=999):
"""
get_papers_by_authors, function.
Make a fresh load of bibliographic data from ADS.
Given a list of author names, return a pandas dataframe
with the list of papers retrieved by the ADSABS service.
Parameters
----------
authors_list: list or string
A list containing the names of the authors.
Returns
-------
byauth: dataframe
A data frame containing the list of authors, number of papers
and the list of papers as "Article" instances.
"""
fl = ['id', | |
self.ms_band_rngsig = np.divide(self.ms_band_rng,self.ms_band_std)
self.ms_band_rngmean = np.divide(self.ms_band_rng,self.ms_band_mean)
if(mode):
self.ms_band_mode = sp.stats.mode(self.ms_pixels,axis=0)[0][0]
self.ms_band_deciles = np.percentile(
self.ms_pixels,np.linspace(10,90,9),axis=0)
self.ms_band_quartiles = np.percentile(self.ms_pixels,[25,75],axis=0)
self.ms_band_iqr = self.ms_band_quartiles[1,:]-self.ms_band_quartiles[0,:]
self.ms_band_iqrsig = np.divide(self.ms_band_iqr,self.ms_band_std)
self.ms_band_iqrmean = np.divide(self.ms_band_iqr,self.ms_band_mean)
self.ms_band_ratio = self.ms_band_mean[:4:]/np.sum(self.ms_band_mean[:4:])
def createMSGLCMfeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
glcm_ms_vals = np.vstack((
mht.haralick(self.glcm_ms_img[:,:,0],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,1],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,2],ignore_zeros=True,
return_mean_ptp=True,distance=distance),
mht.haralick(self.glcm_ms_img[:,:,3],ignore_zeros=True,
return_mean_ptp=True,distance=distance)
))
glcm_ms_vals = np.vstack((glcm_ms_vals,glcm_ms_vals.mean(axis=0))).flatten('C')
if not hasattr(self,'glcm_ms_vals'):
self.glcm_ms_vals = glcm_ms_vals
else:
self.glcm_ms_vals = np.concatenate((self.glcm_ms_vals,
glcm_ms_vals))
if not hasattr(self,'glcm_ms_dist'):
self.glcm_ms_dist = [distance]
else:
self.glcm_ms_dist.append(distance)
def createMSautoCorFeats(self,distance=1):
if not hasattr(self,'glcm_rgb_img'):
self.__createGLCMimgs()
acfeats = np.empty([4,2])
for acor_i in range(4):
N = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],0,distance)
NE = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,distance)
E = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,0)
SE = self.__imgAutocorrelate(self.glcm_ms_img[:,:,acor_i],distance,-distance)
acors = np.array([N,NE,E,SE])
acfeats[acor_i,:] = np.array([acors.mean(),acors.max()-acors.min()])
acfeats = np.vstack((acfeats,acfeats.mean(axis=0))).flatten('C')
if not hasattr(self,'acor_ms_vals'):
self.acor_ms_vals = acfeats
else:
self.acor_ms_vals = np.concatenate((self.acor_ms_vals,
acfeats))
if not hasattr(self,'acor_ms_dist'):
self.acor_ms_dist = [distance]
else:
self.acor_ms_dist.append(distance)
def createMSLBPFeats(self,distance=1):
if not distance in [1,2,3]:
raise ValueError('distance can only be 1,2 or 3')
count_table = np.zeros([4,2+distance*8])
for lbp_i in range(4):
lbp_img = local_binary_pattern(self.ms_img_clip[:,:,lbp_i],8*distance,distance,method='uniform')
lbp_pix = lbp_img[self.ms_mask_clip]
unique, counts = np.unique(lbp_pix, return_counts = True)
table = np.zeros([2+distance*8])
table[unique.astype('int')]=counts
count_table[lbp_i,:] = table/table.sum()
count_table = np.vstack((count_table,count_table.mean(axis=0))).flatten('C')
if not hasattr(self,'lbp_ms_vals'):
self.lbp_ms_vals = count_table
else:
self.lbp_ms_vals = np.concatenate((self.lbp_ms_vals,count_table))
if not hasattr(self,'lbp_ms_dist'):
self.lbp_ms_dist = [distance]
else:
self.lbp_ms_dist.append(distance)
def createMSLawsFeats(self):
# Construct filter bank
L5 = np.array([1,4,6,4,1])
E5 = np.array([-1,-2,0,2,1])
S5 = np.array([-1,0,2,0,-1])
R5 = np.array([1,-4,6,-4,1])
W5 = np.array([-1,2,0,-2,1])
filtbank = [L5,E5,S5,R5,W5]
del L5, E5, S5, R5, W5
filtgrid = np.zeros([5,5,5,5])
for i in range(5):
for j in range(5):
filtgrid[i,j,:,:]=(np.outer(filtbank[i],filtbank[j]))
del filtbank
# compute features
lawsFeat = np.zeros([4,28])
for band in range(4):
mean_15 = convolve(self.ms_img_clip[:,:,band],np.ones([15,15])/225,mode='reflect')
norm_gray = self.ms_img_clip[:,:,band]-mean_15
del mean_15
count_i = 0;
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
if j==i:
convimg = convolve(norm_gray,filtgrid[i,j],mode='reflect')
lawsimg = convolve(np.absolute(convimg),np.ones([15,15]),mode='reflect')
lawsFeat[band,count_i] = lawsimg[self.ms_mask_clip].mean()
lawsFeat[band,count_i+14] = lawsimg[self.ms_mask_clip].std()
count_i += 1
else:
convimg1 = np.absolute(convolve(norm_gray,filtgrid[i,j],mode='reflect'))
convimg2 = np.absolute(convolve(norm_gray,filtgrid[j,i],mode='reflect'))
lawsimg = convolve(convimg1+convimg2,np.ones([15,15])/2,mode='reflect')
lawsFeat[band,count_i] = lawsimg[self.ms_mask_clip].mean()
lawsFeat[band,count_i+14] = lawsimg[self.ms_mask_clip].std()
count_i += 1
self.laws_ms_feats = np.vstack((lawsFeat,lawsFeat.mean(axis=0)))
def createSpecIndices(self):
GRVI_pixels = np.divide(self.rgb_pixels[:,1]-self.rgb_pixels[:,0],
self.rgb_pixels[:,1]+self.rgb_pixels[:,0]+1e-15)
VARI_pixels = np.divide(self.rgb_pixels[:,1]-self.rgb_pixels[:,0],
self.rgb_pixels[:,1]+self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2]+1e-15)
GLIr_pixels = np.divide(2*self.rgb_pixels[:,0] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,2],
2*self.rgb_pixels[:,0]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,2]+1e-15)
GLIg_pixels = np.divide(2*self.rgb_pixels[:,1] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2],
2*self.rgb_pixels[:,1]+self.rgb_pixels[:,0]\
+self.rgb_pixels[:,2]+1e-15)
GLIb_pixels = np.divide(2*self.rgb_pixels[:,2] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,0],
2*self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExG_pixels = np.divide(2*self.rgb_pixels[:,1] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExR_pixels = np.divide(2*self.rgb_pixels[:,0] - self.rgb_pixels[:,1]\
-self.rgb_pixels[:,2],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExB_pixels = np.divide(2*self.rgb_pixels[:,2] - self.rgb_pixels[:,0]\
-self.rgb_pixels[:,1],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExGveg_pixels = 2*self.rgb_pixels[:,1]- self.rgb_pixels[:,0]\
-self.rgb_pixels[:,2]+50
NegExR_pixels = self.rgb_pixels[:,1]- 1.4*self.rgb_pixels[:,0]
ExRveg_pixels = np.divide(1.4*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
ExBveg_pixels = np.divide(1.4*self.rgb_pixels[:,2] -\
self.rgb_pixels[:,0],
self.rgb_pixels[:,2]+self.rgb_pixels[:,1]\
+self.rgb_pixels[:,0]+1e-15)
TGI_pixels = self.rgb_pixels[:,1] -0.39*self.rgb_pixels[:,0]\
-0.61*self.rgb_pixels[:,2]
mGRVI_pixels = np.divide(self.rgb_pixels[:,1]*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0]*self.rgb_pixels[:,0],
self.rgb_pixels[:,1]*self.rgb_pixels[:,1] +\
self.rgb_pixels[:,0]*self.rgb_pixels[:,0]+\
1e-15)
RGBVI_pixels = np.divide(self.rgb_pixels[:,1]*self.rgb_pixels[:,1] -\
self.rgb_pixels[:,0]*self.rgb_pixels[:,2],
self.rgb_pixels[:,1]*self.rgb_pixels[:,1] +\
self.rgb_pixels[:,0]*self.rgb_pixels[:,2]+\
1e-15)
IKAW_pixels = np.divide(self.rgb_pixels[:,0]-self.rgb_pixels[:,2],
self.rgb_pixels[:,0]+self.rgb_pixels[:,2]+1e-15)
NDVI_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,1],
self.ms_pixels[:,3]+self.ms_pixels[:,1]+1e-15)
NDVIg_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,0],
self.ms_pixels[:,3]+self.ms_pixels[:,0]+1e-15)
NDVIre_pixels = np.divide(self.ms_pixels[:,3]-self.ms_pixels[:,2],
self.ms_pixels[:,3]+self.ms_pixels[:,2]+1e-15)
CIG_pixels = np.divide(self.ms_pixels[:,3],self.ms_pixels[:,0]+1e-15)-1
CVI_pixels = np.divide(
np.multiply(
np.multiply(self.ms_pixels[:,3],self.ms_pixels[:,1]),
self.ms_pixels[:,1]
),
self.ms_pixels[:,0]+1e-15
)
GRVIms_pixels = np.divide(self.ms_pixels[:,0]-self.ms_pixels[:,1],
self.ms_pixels[:,0]+self.ms_pixels[:,1]+1e-15)
mGRVIms_pixels = np.divide(self.ms_pixels[:,0]*self.ms_pixels[:,0]-\
self.ms_pixels[:,1]*self.ms_pixels[:,1],
self.ms_pixels[:,0]*self.ms_pixels[:,0]+\
self.ms_pixels[:,1]*self.ms_pixels[:,1]
+1e-15)
NegExRms_pixels = self.ms_pixels[:,0] - 1.4* self.ms_pixels[:,1]
self.rgbindex_list = ('GRVI','VARI','GLIr','GLIg','GLIb','ExG','ExR',
'ExB','ExGveg','NegExR','ExRveg','ExBveg','TGI',
'mGRVI','RGBVI','IKAW')
self.msindex_list = ('NDVI','NDVIg','NDVIre','CIG','CVI','GRVI',
'mGRVI','NegExR')
self.rgb_indices = np.stack((
GRVI_pixels, VARI_pixels, GLIr_pixels, GLIg_pixels,
GLIb_pixels, ExG_pixels, ExR_pixels, ExB_pixels,
ExGveg_pixels, NegExR_pixels, ExRveg_pixels, ExBveg_pixels,
TGI_pixels, mGRVI_pixels, RGBVI_pixels, IKAW_pixels
),axis=1)
self.ms_indices = np.stack((
NDVI_pixels, NDVIg_pixels, NDVIre_pixels, CIG_pixels,
CVI_pixels, GRVIms_pixels, mGRVIms_pixels, NegExRms_pixels
),axis=1)
def createRGBIndFeats(self,mode=False):
if not hasattr(self,'rgb_indices'):
self.createSpecIndices()
self.rgb_ind_max = self.rgb_indices.max(axis=0)
self.rgb_ind_min = self.rgb_indices.min(axis=0)
self.rgb_ind_mean = self.rgb_indices.mean(axis=0)
self.rgb_ind_std = self.rgb_indices.std(axis=0)
self.rgb_ind_median = np.median(self.rgb_indices,axis=0)
self.rgb_ind_cov = np.divide(self.rgb_ind_std,self.rgb_ind_mean)
self.rgb_ind_skew = sp.stats.skew(self.rgb_indices,axis=0)
self.rgb_ind_kurt = sp.stats.kurtosis(self.rgb_indices,axis=0)
self.rgb_ind_sum = self.rgb_indices.sum(axis=0)
self.rgb_ind_rng = self.rgb_ind_max-self.rgb_ind_min
self.rgb_ind_rngsig = np.divide(self.rgb_ind_rng,self.rgb_ind_std)
self.rgb_ind_rngmean = np.divide(self.rgb_ind_rng,self.rgb_ind_mean)
if(mode):
self.rgb_ind_mode = sp.stats.mode(self.rgb_indices,axis=0)[0][0]
self.rgb_ind_deciles = np.percentile(self.rgb_indices,
np.linspace(10,90,9),axis=0)
self.rgb_ind_quartiles = np.percentile(self.rgb_indices,[25,75],axis=0)
self.rgb_ind_iqr = self.rgb_ind_quartiles[1,:]-self.rgb_ind_quartiles[0,:]
self.rgb_ind_iqrsig = np.divide(self.rgb_ind_iqr,self.rgb_ind_std)
self.rgb_ind_iqrmean = np.divide(self.rgb_ind_iqr,self.rgb_ind_mean)
def createMSIndFeats(self,mode=False):
if not hasattr(self,'ms_indices'):
self.createSpecIndices()
self.ms_ind_max = self.ms_indices.max(axis=0)
self.ms_ind_min = self.ms_indices.min(axis=0)
self.ms_ind_mean = self.ms_indices.mean(axis=0)
self.ms_ind_std = self.ms_indices.std(axis=0)
self.ms_ind_median = np.median(self.ms_indices,axis=0)
self.ms_ind_cov = np.divide(self.ms_ind_std,self.ms_ind_mean)
self.ms_ind_skew = sp.stats.skew(self.ms_indices,axis=0)
self.ms_ind_kurt = sp.stats.kurtosis(self.ms_indices,axis=0)
self.ms_ind_sum = self.ms_indices.sum(axis=0)
self.ms_ind_rng = self.ms_ind_max-self.ms_ind_min
self.ms_ind_rngsig = np.divide(self.ms_ind_rng,self.ms_ind_std)
self.ms_ind_rngmean = np.divide(self.ms_ind_rng,self.ms_ind_mean)
if(mode):
self.ms_ind_mode = sp.stats.mode(self.ms_indices,axis=0)[0][0]
self.ms_ind_deciles = np.percentile(self.ms_indices,
np.linspace(10,90,9),axis=0)
self.ms_ind_quartiles = np.percentile(self.ms_indices,[25,75],axis=0)
self.ms_ind_iqr = self.ms_ind_quartiles[1,:]-self.ms_ind_quartiles[0,:]
self.ms_ind_iqrsig = np.divide(self.ms_ind_iqr,self.ms_ind_std)
self.ms_ind_iqrmean = np.divide(self.ms_ind_iqr,self.ms_ind_mean)
def createDSMRawFeats(self,mode=False):
self.dsm_raw_max = np.array([self.dsm_pixels.max(axis=0)])
self.dsm_raw_min = np.array([self.dsm_pixels.min(axis=0)])
self.dsm_raw_mean = np.array([self.dsm_pixels.mean(axis=0)])
self.dsm_raw_std = np.array([self.dsm_pixels.std(axis=0)])
self.dsm_raw_median = np.array([np.median(self.dsm_pixels,axis=0)])
self.dsm_raw_cov = np.divide(self.dsm_raw_std,self.dsm_raw_mean)
self.dsm_raw_skew = np.array([sp.stats.skew(self.dsm_pixels,axis=0)])
self.dsm_raw_kurt = np.array([sp.stats.kurtosis(self.dsm_pixels,axis=0)])
self.dsm_raw_sum = np.array([self.dsm_pixels.sum(axis=0)])
self.dsm_raw_rng = self.dsm_raw_max-self.dsm_raw_min
self.dsm_raw_rngsig = np.divide(self.dsm_raw_rng,self.dsm_raw_std)
self.dsm_raw_rngmean = np.divide(self.dsm_raw_rng,self.dsm_raw_mean)
if(mode):
self.dsm_raw_mode = sp.stats.mode(self.dsm_pixels,axis=0)[0][0]
self.dsm_raw_deciles = np.percentile(
self.dsm_pixels,np.linspace(10,90,9),axis=0)
self.dsm_raw_quartiles = np.percentile(self.dsm_pixels,[25,75],axis=0)
self.dsm_raw_iqr = np.array([self.dsm_raw_quartiles[1]-self.dsm_raw_quartiles[0]])
self.dsm_raw_iqrsig = np.divide(self.dsm_raw_iqr,self.dsm_raw_std)
self.dsm_raw_iqrmean = np.divide(self.dsm_raw_iqr,self.dsm_raw_mean)
self.dsm_raw_mad = np.array([np.median(np.absolute(self.dsm_pixels - np.median(self.dsm_pixels)))])
self.dsm_raw_maxmed = self.dsm_raw_max - self.dsm_raw_median
self.dsm_raw_minmed = self.dsm_raw_min - self.dsm_raw_median
self.dsm_raw_summed = np.array([(self.dsm_pixels-self.dsm_raw_median).sum(axis=0)])
self.dsm_raw_decilesmed = self.dsm_raw_deciles - self.dsm_raw_median
self.dsm_raw_quartilesmed = self.dsm_raw_quartiles - self.dsm_raw_median
def __createDSMGLCMImg(self,levels=32):
# clamp levels number of height bands, spread uniformly so that
# 0 means below 5% percentile of H, levels-1 means above top 95%-ile
# and all else are spread out linearly in this range
# clamp minimum of 1 in region of interest to avoid issue of mostly zeroes
if(levels>255):
raise ValueError('max number of levels is 255')
lims = np.percentile(self.dsm_pixels,[5,95],axis=0)
scaleimg = rescale_intensity(self.dsm_img_clip,in_range = (lims[0],lims[1]))
self.dsm_glcm_img = (scaleimg*(levels-1)).astype('uint8')
local_img=np.zeros(self.dsm_glcm_img.shape,dtype='uint8')
local_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]=np.maximum(self.dsm_glcm_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]],np.ones((self.dsm_mask[0].__len__())))
local_img = local_img[~np.all(local_img==0,axis=1),:]
local_img = local_img[:,~np.all(local_img==0,axis=0)]
self.dsm_glcm_img_masked = local_img
def createDSMGLCMfeats(self,distance=1):
if not hasattr(self,'dsm_glcm_img_masked'):
self.__createDSMGLCMImg()
glcm_dsm_vals = mht.haralick(self.dsm_glcm_img_masked,ignore_zeros=True,
return_mean_ptp=True,distance=distance)
if not hasattr(self,'glcm_dsm_vals'):
self.glcm_dsm_vals = glcm_dsm_vals
else:
self.glcm_dsm_vals = np.concatenate((self.glcm_dsm_vals,
glcm_dsm_vals))
if not hasattr(self,'glcm_dsm_dist'):
self.glcm_dsm_dist = [distance]
else:
self.glcm_dsm_dist.append(distance)
def createDSMautoCorFeats(self,distance=1):
local_img = np.zeros(self.dsm_img_clip.shape)
local_img[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]=self.dsm_img_clip[self.dsm_mask_clip[0],self.dsm_mask_clip[1]]
N = self.__imgAutocorrelate(local_img,0,distance)
NE = self.__imgAutocorrelate(local_img,distance,distance)
E = self.__imgAutocorrelate(local_img,distance,0)
SE = self.__imgAutocorrelate(local_img,distance,-distance)
acors = np.array([N,NE,E,SE])
acfeats = np.array([acors.mean(),acors.max()-acors.min()])
if not hasattr(self,'acor_dsm_vals'):
self.acor_dsm_vals = acfeats
else:
self.acor_dsm_vals = np.concatenate((self.acor_dsm_vals,
acfeats))
if not hasattr(self,'acor_dsm_dist'):
self.acor_dsm_dist = [distance]
else:
self.acor_dsm_dist.append(distance)
def createDSMLBPFeats(self,distance=1):
if not distance in [1,2,3]:
raise ValueError('distance can only be 1,2 or 3')
if not hasattr(self,'dsm_glcm_img'):
self.__createDSMGLCMImg()
lbp_img = local_binary_pattern(self.dsm_glcm_img,8*distance,distance,method='uniform')
lbp_pix = lbp_img[self.dsm_mask_clip]
unique, counts = np.unique(lbp_pix, return_counts = True)
count_table = np.zeros([2+distance*8])
count_table[unique.astype('int')]=counts
count_table = count_table/count_table.sum()
if not hasattr(self,'lbp_dsm_vals'):
self.lbp_dsm_vals = count_table
else:
self.lbp_dsm_vals = np.concatenate((self.lbp_dsm_vals,count_table))
if not hasattr(self,'lbp_dsm_dist'):
self.lbp_dsm_dist = [distance]
else:
self.lbp_dsm_dist.append(distance)
def createDSMLawsFeats(self):
mean_15 = convolve(self.dsm_img_clip,np.ones([15,15])/225,mode='reflect')
norm_gray = self.dsm_img_clip-mean_15
del mean_15
# Constuct filter bank
L5 = np.array([1,4,6,4,1])
E5 = np.array([-1,-2,0,2,1])
S5 = np.array([-1,0,2,0,-1])
R5 = np.array([1,-4,6,-4,1])
W5 = np.array([-1,2,0,-2,1])
filtbank = [L5,E5,S5,R5,W5]
del L5, E5, S5, R5, W5
filtgrid = np.zeros([5,5,5,5])
for i in range(5):
for j in range(5):
filtgrid[i,j,:,:]=(np.outer(filtbank[i],filtbank[j]))
del filtbank
# compute features
lawsFeat = np.zeros([14,2])
count_i = 0;
for i in range(5):
for j in range(5):
if j < i or (i==0 and j ==0):
continue
if j==i:
convimg = convolve(norm_gray,filtgrid[i,j],mode='reflect')
lawsimg = convolve(np.absolute(convimg),np.ones([15,15]),mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.dsm_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.dsm_mask_clip].std()
count_i += 1
else:
convimg1 = np.absolute(convolve(norm_gray,filtgrid[i,j],mode='reflect'))
convimg2 = np.absolute(convolve(norm_gray,filtgrid[j,i],mode='reflect'))
lawsimg = convolve(convimg1+convimg2,np.ones([15,15])/2,mode='reflect')
lawsFeat[count_i,0] = lawsimg[self.dsm_mask_clip].mean()
lawsFeat[count_i,1] = lawsimg[self.dsm_mask_clip].std()
count_i += 1
self.laws_dsm_feats = lawsFeat
def stackFeats(self):
featStack = np.array([])
featList = []
featClass = []
featSizeInvar = []
featHeightInvar=[]
featScale = []
if hasattr(self,'rgb_band_max'):
featList.extend(['rgb_band_max_R','rgb_band_max_G','rgb_band_max_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_max
else:
featStack = np.concatenate((featStack,self.rgb_band_max))
if hasattr(self,'rgb_band_min'):
featList.extend(['rgb_band_min_R','rgb_band_min_G','rgb_band_min_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_min
else:
featStack = np.concatenate((featStack,self.rgb_band_min))
if hasattr(self,'rgb_band_mean'):
featList.extend(['rgb_band_mean_R','rgb_band_mean_G','rgb_band_mean_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_mean
else:
featStack = np.concatenate((featStack,self.rgb_band_mean))
if hasattr(self,'rgb_band_std'):
featList.extend(['rgb_band_std_R','rgb_band_std_G','rgb_band_std_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_std
else:
featStack = np.concatenate((featStack,self.rgb_band_std))
if hasattr(self,'rgb_band_median'):
featList.extend(['rgb_band_median_R','rgb_band_median_G','rgb_band_median_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_median
else:
featStack = np.concatenate((featStack,self.rgb_band_median))
if hasattr(self,'rgb_band_cov'):
featList.extend(['rgb_band_cov_R','rgb_band_cov_G','rgb_band_cov_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_cov
else:
featStack = np.concatenate((featStack,self.rgb_band_cov))
if hasattr(self,'rgb_band_skew'):
featList.extend(['rgb_band_skew_R','rgb_band_skew_G','rgb_band_skew_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_skew
else:
featStack = np.concatenate((featStack,self.rgb_band_skew))
if hasattr(self,'rgb_band_kurt'):
featList.extend(['rgb_band_kurt_R','rgb_band_kurt_G','rgb_band_kurt_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_kurt
else:
featStack = np.concatenate((featStack,self.rgb_band_kurt))
if hasattr(self,'rgb_band_sum'):
featList.extend(['rgb_band_sum_R','rgb_band_sum_G','rgb_band_sum_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([False]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_sum
else:
featStack = np.concatenate((featStack,self.rgb_band_sum))
if hasattr(self,'rgb_band_rng'):
featList.extend(['rgb_band_rng_R','rgb_band_rng_G','rgb_band_rng_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.band_rng
else:
featStack = np.concatenate((featStack,self.rgb_band_rng))
if hasattr(self,'rgb_band_rngsig'):
featList.extend(['rgb_band_rngsig_R','rgb_band_rngsig_G','rgb_band_rngsig_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_rngsig
else:
featStack = np.concatenate((featStack,self.rgb_band_rngsig))
if hasattr(self,'rgb_band_rngmean'):
featList.extend(['rgb_band_rngmean_R','rgb_band_rngmean_G','rgb_band_rngmean_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_rngmean
else:
featStack = np.concatenate((featStack,self.rgb_band_rngmean))
if hasattr(self,'rgb_band_mode'):
featList.extend(['rgb_band_mode_R','rgb_band_mode_G','rgb_band_mode_B'])
featClass.extend(['rgb_band']*3)
featSizeInvar.extend([True]*3)
featHeightInvar.extend([True]*3)
featScale.extend([0]*3)
if featStack.size==0:
featStack = self.rgb_band_mode
else:
featStack = np.concatenate((featStack,self.rgb_band_mode))
if | |
yyyymmddHHMM
example: 2018-08-26 14:18:40 ->> 201808261418
"""
return dateString.replace("-", "").replace(" ", "").replace(":",
"").replace(
"/", "").replace("T", "")[:12]
# achieve rule3 to yyyymmddHHMM
def e_timeToString(dateString):
"""
input: string
output: string
description: format dateString to yyyymmddHHMM
example: Wed Aug 29 07:23:03 CST 2018 ->> 201808290723
"""
# define month list for get digital
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"]
year_string = dateString[-4:]
month_string = str(month.index(dateString[4:7]) + 1)
if len(month_string) == 1:
month_string = "0" + month_string
day_string = dateString[8:10]
# time format HHMM
time_string = dateString[11:16].replace(":", "")
return year_string + month_string + day_string + time_string
def getCtimeOfFile(fileName):
"""
input: string
output: string
description: Get the first line of the file to determine whether there
is a date,
if any, change to the specified date format(yyyymmddHHMM)
and return,
if not, return an empty string
"""
if not os.path.exists(fileName):
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % str(fileName))
# 2018-08-26 14:18:40
rule1 = r'\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-6]\d:[0-6]\d'
# 2018/08/25 20:40:16
rule2 = r'\d{4}/[0-1]\d/[0-3]\d [0-2]\d:[0-6]\d:[0-6]\d'
# Wed Aug 29 00:00:03 CST 2018
rule3 = r'(Mon|Tue|Wed|Thu|Fri|Sat|Sun)\b (' \
r'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\b [0-3]\d [' \
r'0-2]\d:[0-6]\d:[0-6]\d CST \d{4}'
# 2018-08-25T20:49:05+08:00
rule4 = r'\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-6]\d:[0-6]\d'
# defining rules and partitioning method key-value pairs
rule_dict = {rule1: d_timeToString,
rule2: d_timeToString,
rule3: e_timeToString,
rule4: d_timeToString
}
# open file
with open(fileName, "r") as f:
# get the first line of the file
line = f.readline().strip()
# match according to known rules
for rule in rule_dict.keys():
result = re.search(rule, line)
if result:
# change to the specified date format and return
return rule_dict[rule](result.group())
return ""
def log_copy_for_zenith():
"""
function: collected log files
output: Successfully collected log files
"""
g_logger.debug("Collecting log files.")
g_jobInfo.jobName = "Collecting zenith log information"
try:
# get envPath $GAUSSLOG
gausslogPath = DefaultValue.getPathFileOfENV("GAUSSLOG")
# define necessary path
logfilePath = "%s/logfiles/" % g_resultdir
keyword_result = "keyword_result.txt"
# match the log files that meet the time requirements
# and add them to the archive
logfileList = []
g_logger.debug("Start matching log file.")
for root, dirs, files in os.walk(gausslogPath):
for f in files:
logfile = os.path.join(root, f)
# get matched files in the list
if filterFile(f):
# get the time of file
statInfo = os.stat(logfile)
# convert timestamp to format "%Y%m%d%H%M"
mtime = time.strftime("%Y%m%d%H%M",
time.localtime(statInfo.st_mtime))
ctime = getCtimeOfFile(logfile)
if not ctime:
ctime = mtime
timeList = [mtime, ctime]
# compare file time
if matchFile(g_opts.begin, g_opts.end, timeList):
childDir = ''.join(root.split(gausslogPath)[1:])
childDir = childDir.lstrip("/")
targetDir = os.path.join(logfilePath, childDir)
if not os.path.exists(targetDir):
dir_permission = 0o700
os.makedirs(targetDir, mode=dir_permission)
g_file.cpFile(logfile, targetDir)
g_logger.debug("Match log file completion.")
g_jobInfo.successTask.append("Match log file")
except Exception as e:
if os.path.exists(logfilePath):
g_file.cleanDirectoryContent(logfilePath)
g_logger.debug("Failed to filter log files. Error:\n%s" % str(e))
g_jobInfo.failedTask["Failed to filter log files"] = str(e)
g_logger.log(json.dumps(g_jobInfo.__dict__))
raise Exception("")
if (g_opts.key):
# Look for keyword matching in the dir and write to the specified file
cmd = "echo \"\" > %s/logfiles/%s; for f in `find %s -type f`;" \
" do grep -ai '%s' $f >> %s/logfiles/%s; done" % (
g_resultdir, keyword_result, logfilePath, g_opts.key, g_resultdir,
keyword_result)
(status, output) = subprocess.getstatusoutput(cmd)
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug("Successfully collected log files.")
def log_check(logFileName):
"""
function: log check
input : logFileName
output: filename includes keywords or not
"""
for c in g_opts.content:
c = c.replace(" ", "").lower()
if len(c) > 0 and c in logFileName.lower():
return 1
return 0
def log_copy():
"""
function: collected log files
input : NA
output: NA
"""
g_logger.debug("Starting collect log.")
g_jobInfo.jobName = "Collecting pg_log information"
logfiletar = "log_%s.tar.gz" % datetime.datetime.now().strftime(
"%Y%m%d_%H%M%S%f")
keyword_result = "keyword_result.txt"
deleteCmd = "cd $GAUSSLOG && if [ -d tmp_gs_collector ];" \
"then rm -rf tmp_gs_collector; fi"
if (g_opts.key is not None and g_opts.key != ""):
g_logger.debug(
"Keyword for collecting log in base64 encode [%s]." % g_opts.key)
g_opts.key = base64.b64decode(g_opts.key)
g_logger.debug(
"Keyword for collecting log in plain text [%s]." % g_opts.key)
g_logger.debug(
"Speed limit to copy log files is %d KB/s." % g_opts.speedLimitKBs)
# Filter the log files, if has keyword, do not collect prf file
if (g_opts.key is not None and g_opts.key != ""):
cmd = "cd $GAUSSLOG && if [ -d tmp_gs_collector ];" \
"then rm -rf tmp_gs_collector; " \
"fi && (find . -type f -iname '*.log' -print)" \
" | xargs ls --time-style='+ %Y%m%d%H%M' -ll"
else:
cmd = "cd $GAUSSLOG && if [ -d tmp_gs_collector ];" \
"then rm -rf tmp_gs_collector; " \
"fi && (find . -type f -iname '*.log' -print && " \
"find . -type f -iname '*.prf' -print) " \
"| xargs ls --time-style='+ %Y%m%d%H%M' -ll"
(status, output) = subprocess.getstatusoutput(cmd)
logFiles = output.split("\n")
logs = []
Directorys = []
findFiles = 0
# If there is a log file filtered by time
if len(logFiles[0].split()) != 2:
for logFile in logFiles:
logFileName = logFile.split()[6]
logStartTime = formatTime(logFileName)
# If the log file name does not meet the format requirements,skip
if not logStartTime.isdigit() or len(logStartTime) != 12:
continue
logStartTime = int(logStartTime)
logEndTime = int(logFile.split()[5])
# Filter out the log we need
if (logEndTime > int(g_opts.begin) and logStartTime < int(
g_opts.end) and log_check(logFileName)):
logs.append(logFileName)
findFiles = 1
if findFiles == 1:
g_jobInfo.successTask.append("find log files")
else:
g_jobInfo.failedTask["find log files"] = ErrorCode.GAUSS_535[
"GAUSS_53504"] % 'log'
g_logger.debug("Successfully find log files.")
else:
g_jobInfo.failedTask["find log files"] = ErrorCode.GAUSS_535[
"GAUSS_53505"]
g_logger.debug("There is no log files.")
# Make temporary directory and copy
cmd = "cd $GAUSSLOG && mkdir -p -m %s tmp_gs_collector" % \
DefaultValue.DIRECTORY_MODE
(status, output) = subprocess.getstatusoutput(cmd)
for log in logs:
Directorys.append(os.path.dirname(log))
for directory in Directorys:
cmd = "cd $GAUSSLOG && mkdir -p -m %s tmp_gs_collector/'%s'" % (
DefaultValue.DIRECTORY_MODE, directory)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
(status1, output1) = subprocess.getstatusoutput(deleteCmd)
g_jobInfo.failedTask["mkdir"] = ErrorCode.GAUSS_535["GAUSS_53506"]
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug("Failed to mkdir. Error:\n%s." % output)
raise Exception("")
for log in logs:
if int(g_opts.speedLimitFlag) == 1:
cmd = "cd $GAUSSLOG && rsync --bwlimit=%d '%s' " \
"tmp_gs_collector/'%s'" % (
g_opts.speedLimitKBs, log, log)
else:
cmd = "cd $GAUSSLOG && cp '%s' tmp_gs_collector/'%s'" % (log, log)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
(status1, output1) = subprocess.getstatusoutput(deleteCmd)
g_jobInfo.failedTask["copy log files"] = replaceInvalidStr(output)
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug("Failed to copy logFiles. Error:\n%s." % output)
raise Exception("")
g_jobInfo.successTask.append("copy log files")
g_logger.debug("Successful to copy logFiles.")
# Filter zip files
cmd = "cd $GAUSSLOG && find . -type f -iname '*.zip' -print" \
" | xargs ls --time-style='+ %Y%m%d%H%M' -ll"
(status, output) = subprocess.getstatusoutput(cmd)
zipFiles = output.split("\n")
# If there is a zip file filtered by time
if len(zipFiles[0].split()) != 2:
for zipFile in zipFiles:
zipFileName = zipFile.split()[6]
logStartTime = formatTime(zipFileName)
# If the zip file name does not meet the format requirements,skip
if not logStartTime.isdigit() or len(logStartTime) != 12:
continue
logStartTime = int(logStartTime)
logEndTime = int(zipFile.split()[5])
# Filter out the log we need
if (logEndTime > int(g_opts.begin) and logStartTime < int(
g_opts.end)):
zipdir = os.path.dirname(zipFileName)
g_jobInfo.successTask.append(
"find log zip files: %s" % zipFileName)
cmd = "cd $GAUSSLOG && mkdir -p -m %s tmp_gs_collector/%s " \
"&& unzip -o %s -d tmp_gs_collector/%s " % \
(DefaultValue.DIRECTORY_MODE, zipdir,
zipFileName, zipdir)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
g_jobInfo.failedTask[
"find log zip files"] = replaceInvalidStr(output)
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug(("Failed to filter zip files. Error:\n%s."
% output) + ("The cmd is %s " % cmd))
raise Exception("")
g_logger.debug("Successfully filter zip files.")
else:
g_logger.debug("There is no zip files.")
# Filter keywords
if (g_opts.key is not None and g_opts.key != ""):
if (len(logs) != 0):
g_opts.key = g_opts.key.replace('$', '\$')
g_opts.key = g_opts.key.replace('\"', '\\\"')
cmd = "cd $GAUSSLOG/tmp_gs_collector && "
cmd = "%s grep \"%s\" -r * > %s/logfiles/%s" % (
cmd, g_opts.key, g_resultdir, keyword_result)
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0 and output != ""):
cmd = "rm -rf $GAUSSLOG/tmp_gs_collector"
(status1, output1) = DefaultValue.retryGetstatusoutput(cmd)
g_jobInfo.failedTask[
"filter keyword"] = "keywords: %s, Error: %s" % (
g_opts.key, output)
g_logger.log(json.dumps(g_jobInfo.__dict__))
g_logger.debug(
"Failed to filter keyword. Error:\n%s." % output)
raise Exception("")
else:
cmd = "rm -rf $GAUSSLOG/tmp_gs_collector"
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
g_logger.debug("Successfully filter keyword.")
g_jobInfo.successTask.append("filter keyword: %s" % g_opts.key)
else:
cmd = "touch %s/logfiles/%s && " % (g_resultdir, keyword_result)
cmd = "%s rm -rf $GAUSSLOG/tmp_gs_collector" % cmd
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
if (status | |
})
def edit_score(request, code):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
#Checking if form creator is user
if formInfo.creator != request.user:
return HttpResponseRedirect(reverse("403"))
if not formInfo.is_quiz:
return HttpResponseRedirect(reverse("edit_form", args = [code]))
else:
if request.method == "POST":
data = json.loads(request.body)
question_id = data["question_id"]
question = formInfo.questions.filter(id = question_id)
if question.count() == 0:
return HttpResponseRedirect(reverse("edit_form", args = [code]))
else: question = question[0]
score = data["score"]
if score == "": score = 0
question.score = score
question.save()
return JsonResponse({"message": "Success"})
def answer_key(request, code):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
#Checking if form creator is user
if formInfo.creator != request.user:
return HttpResponseRedirect(reverse("403"))
if not formInfo.is_quiz:
return HttpResponseRedirect(reverse("edit_form", args = [code]))
else:
if request.method == "POST":
data = json.loads(request.body)
question = Questions.objects.filter(id = data["question_id"])
if question.count() == 0: return HttpResponseRedirect(reverse("edit_form", args = [code]))
else: question = question[0]
if question.question_type == "short" or question.question_type == "paragraph":
question.answer_key = data["answer_key"]
question.save()
else:
for i in question.choices.all():
i.is_answer = False
i.save()
if question.question_type == "multiple choice":
choice = question.choices.get(pk = data["answer_key"])
choice.is_answer = True
choice.save()
else:
for i in data["answer_key"]:
choice = question.choices.get(id = i)
choice.is_answer = True
choice.save()
question.save()
return JsonResponse({'message': "Success"})
def feedback(request, code):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
#Checking if form creator is user
if formInfo.creator != request.user:
return HttpResponseRedirect(reverse("403"))
if not formInfo.is_quiz:
return HttpResponseRedirect(reverse("edit_form", args = [code]))
else:
if request.method == "POST":
data = json.loads(request.body)
question = formInfo.questions.get(id = data["question_id"])
question.feedback = data["feedback"]
question.save()
return JsonResponse({'message': "Success"})
def view_form(request, code):
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
if formInfo.authenticated_responder:
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, "index/view_form.html", {
"form": formInfo
})
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def submit_form(request, code):
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
if formInfo.authenticated_responder:
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if request.method == "POST":
code = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(20))
if formInfo.authenticated_responder:
response = Responses(response_code = code, response_to = formInfo, responder_ip = get_client_ip(request), responder = request.user)
response.save()
else:
if not formInfo.collect_email:
response = Responses(response_code = code, response_to = formInfo, responder_ip = get_client_ip(request))
response.save()
else:
response = Responses(response_code = code, response_to = formInfo, responder_ip = get_client_ip(request), responder_email=request.POST["email-address"])
response.save()
for i in request.POST:
#Excluding csrf token
if i == "csrfmiddlewaretoken" or i == "email-address":
continue
question = formInfo.questions.get(id = i)
for j in request.POST.getlist(i):
answer = Answer(answer=j, answer_to = question)
answer.save()
response.response.add(answer)
response.save()
return render(request, "index/form_response.html", {
"form": formInfo,
"code": code
})
def responses(request, code):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
responsesSummary = []
choiceAnswered = {}
filteredResponsesSummary = {}
for question in formInfo.questions.all():
answers = Answer.objects.filter(answer_to = question.id)
if question.question_type == "multiple choice" or question.question_type == "checkbox":
choiceAnswered[question.question] = choiceAnswered.get(question.question, {})
for answer in answers:
choice = answer.answer_to.choices.get(id = answer.answer).choice
choiceAnswered[question.question][choice] = choiceAnswered.get(question.question, {}).get(choice, 0) + 1
responsesSummary.append({"question": question, "answers":answers })
for answr in choiceAnswered:
filteredResponsesSummary[answr] = {}
keys = choiceAnswered[answr].values()
for choice in choiceAnswered[answr]:
filteredResponsesSummary[answr][choice] = choiceAnswered[answr][choice]
#Checking if form creator is user
if formInfo.creator != request.user:
return HttpResponseRedirect(reverse("403"))
return render(request, "index/responses.html", {
"form": formInfo,
"responses": Responses.objects.filter(response_to = formInfo),
"responsesSummary": responsesSummary,
"filteredResponsesSummary": filteredResponsesSummary
})
def response(request, code, response_code):
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
#Checking if form creator is user
if not formInfo.allow_view_score:
if formInfo.creator != request.user:
return HttpResponseRedirect(reverse("403"))
total_score = 0
score = 0
responseInfo = Responses.objects.filter(response_code = response_code)
if responseInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: responseInfo = responseInfo[0]
if formInfo.is_quiz:
for i in formInfo.questions.all():
total_score += i.score
for i in responseInfo.response.all():
if i.answer_to.question_type == "short" or i.answer_to.question_type == "paragraph":
if i.answer == i.answer_to.answer_key: score += i.answer_to.score
elif i.answer_to.question_type == "multiple choice":
answerKey = None
for j in i.answer_to.choices.all():
if j.is_answer: answerKey = j.id
if answerKey is not None and int(answerKey) == int(i.answer):
score += i.answer_to.score
_temp = []
for i in responseInfo.response.all():
if i.answer_to.question_type == "checkbox" and i.answer_to.pk not in _temp:
answers = []
answer_keys = []
for j in responseInfo.response.filter(answer_to__pk = i.answer_to.pk):
answers.append(int(j.answer))
for k in j.answer_to.choices.all():
if k.is_answer and k.pk not in answer_keys: answer_keys.append(k.pk)
_temp.append(i.answer_to.pk)
if answers == answer_keys: score += i.answer_to.score
return render(request, "index/response.html", {
"form": formInfo,
"response": responseInfo,
"score": score,
"total_score": total_score
})
def edit_response(request, code, response_code):
formInfo = Form.objects.filter(code = code)
#Checking if form exists
if formInfo.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: formInfo = formInfo[0]
response = Responses.objects.filter(response_code = response_code, response_to = formInfo)
if response.count() == 0:
return HttpResponseRedirect(reverse('404'))
else: response = response[0]
if formInfo.authenticated_responder:
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if response.responder != request.user:
return HttpResponseRedirect(reverse('403'))
if request.method == "POST":
if formInfo.authenticated_responder and not response.responder:
response.responder = request.user
response.save()
if formInfo.collect_email:
response.responder_email = request.POST["email-address"]
response.save()
#Deleting all existing answers
for i in response.response.all():
i.delete()
for i in request.POST:
#Excluding csrf token and email address
if i == "csrfmiddlewaretoken" or i == "email-address":
continue
question = formInfo.questions.get(id = i)
for j in request.POST.getlist(i):
answer = Answer(answer=j, answer_to = question)
answer.save()
response.response.add(answer)
response.save()
if formInfo.is_quiz:
return HttpResponseRedirect(reverse("response", args = [formInfo.code, response.response_code]))
else:
return render(request, "index/form_response.html", {
"form": formInfo,
"code": response.response_code
})
return render(request, "index/edit_response.html", {
"form": formInfo,
"response": response
})
def contact_form_template(request):
# Creator must be authenticated
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
# Create a blank form API
if request.method == "POST":
code = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))
name = Questions(question_type = "short", question= "Name", required= True)
name.save()
email = Questions(question_type="short", question = "Email", required = True)
email.save()
address = Questions(question_type="paragraph", question="Address", required = True)
address.save()
phone = Questions(question_type="short", question="Phone number", required = False)
phone.save()
comments = Questions(question_type = "paragraph", question = "Comments", required = False)
comments.save()
form = Form(code = code, title = "Contact information", creator=request.user, background_color="#e2eee0", allow_view_score = False, edit_after_submit = True)
form.save()
form.questions.add(name)
form.questions.add(email)
form.questions.add(address)
form.questions.add(phone)
form.questions.add(comments)
form.save()
return JsonResponse({"message": "Sucess", "code": code})
def customer_feedback_template(request):
# Creator must be authenticated
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
# Create a blank form API
if request.method == "POST":
code = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))
comment = Choices(choice = "Comments")
comment.save()
question = Choices(choice = "Questions")
question.save()
bug = Choices(choice = "Bug Reports")
bug.save()
feature = Choices(choice = "Feature Request")
feature.save()
feedback_type = Questions(question = "Feedback Type", question_type="multiple choice", required=False)
feedback_type.save()
feedback_type.choices.add(comment)
feedback_type.choices.add(bug)
feedback_type.choices.add(question)
feedback_type.choices.add(feature)
feedback_type.save()
feedback = Questions(question = "Feedback", question_type="paragraph", required=True)
feedback.save()
suggestion = Questions(question = "Suggestions for improvement", question_type="paragraph", required=False)
suggestion.save()
name = Questions(question = "Name", question_type="short", required=False)
name.save()
email = Questions(question= "Email", question_type="short", required=False)
email.save()
form = Form(code = code, title = "Customer Feedback", creator=request.user, background_color="#e2eee0", confirmation_message="Thanks so much for giving us feedback!",
description = "We would love to hear your thoughts or feedback on how we can improve your experience!", allow_view_score = False, edit_after_submit = True)
form.save()
form.questions.add(feedback_type)
form.questions.add(feedback)
form.questions.add(suggestion)
form.questions.add(name)
form.questions.add(email)
return JsonResponse({"message": "Sucess", "code": code})
def event_registration_template(request):
# Creator must be authenticated
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
# Create a blank form API
if request.method == "POST":
code = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))
name = Questions(question="Name", question_type= "short", required=False)
name.save()
email = Questions(question = "email", question_type="short", required=True)
email.save()
organization = Questions(question = "Organization", question_type= "short", required=True)
organization.save()
day1 = Choices(choice="Day 1")
day1.save()
day2 = Choices(choice= "Day 2")
day2.save()
day3 = Choices(choice= "Day 3")
day3.save()
day = Questions(question="What days will you attend?", question_type="checkbox", required=True)
day.save()
day.choices.add(day1)
day.choices.add(day2)
day.choices.add(day3)
day.save()
dietary_none = Choices(choice="None")
dietary_none.save()
dietary_vegetarian = Choices(choice="Vegetarian")
dietary_vegetarian.save()
dietary_kosher = Choices(choice="Kosher")
dietary_kosher.save()
dietary_gluten = Choices(choice = "Gluten-free")
dietary_gluten.save()
dietary = Questions(question = | |
self.check_increasing_bool(node1.corner_lower,node1.corner_upper,node2.corner_lower,node2.corner_upper,self.in_feats,self.de_feats,self.nmt_feats):
return np.int(node1.predicted_class>node2.predicted_class)
elif self.check_increasing_bool(node2.corner_lower,node2.corner_upper,node1.corner_lower,node1.corner_upper,self.in_feats,self.de_feats,self.nmt_feats):
return np.int(node2.predicted_class>node1.predicted_class)
else:
return 0.
def get_split_indxs(self,X,ifeat,split_values,data_type):
if data_type=='ordinal':
if len(X.shape)==2:
indx_left=np.arange(X.shape[0])[X[:,ifeat-1]<=split_values]
indx_right=np.arange(X.shape[0])[X[:,ifeat-1]>split_values]
if len(indx_left)>0 and len(indx_right)>0:
split_pt_act=(np.max(X[indx_left,ifeat-1])+np.min(X[indx_right,ifeat-1]))/2.
else:
split_pt_act=split_values
else:
indx_left=np.arange(1) if X[ifeat-1]<=split_values else np.arange(0)
indx_right=np.arange(1) if X[ifeat-1]>split_values else np.arange(0)
split_pt_act=split_values #(np.max(X[indx_left,ifeat-1])+np.min((X[indx_right,ifeat-1]))/2.
else:
if len(X.shape)==2:
indx_bool_left=np.asarray([( X[i,ifeat-1] in split_values ) for i in np.arange(X.shape[0])],dtype='bool')
indx_bool_right= indx_bool_left==False
indx_left=np.arange(X.shape[0])[indx_bool_left]
indx_right=np.arange(X.shape[0])[indx_bool_right]
else: # treat as one row
indx_left=np.arange(1) if X[ifeat-1] in split_values else np.arange(0)# X.shape[0])[[(X[i,ifeat-1] in split_values) for i in np.arange(X.shape[0])]]
indx_right=np.arange(1) if X[ifeat-1] not in split_values else np.arange(0)
split_pt_act=split_values
return [indx_left,indx_right,split_pt_act]
def printtree(self,node=None,indent=''):
if node is None:
node=self.root_node
# Is this a leaf node?
if node.is_leaf():
print('Leaf ' + str(node.index_leaf) +': ' + str(node._probabilities) + str(node.tally))# + ' ' + str(node.corner_lower) + ' ' + str(node.corner_upper))
else:
print('feat_' + str(node.decision_feat)+''+('<=' if node.decision_data_type=='ordinal' else ' in ')+str(node.decision_values)+'? ' + str(node.tally))
# Print the branches
print(indent+'T->', end=" ")
self.printtree(node.left,indent+' ')
print(indent+'F->', end=" ")
self.printtree(node.right,indent+' ')
@property
def feat_vals(self):
if self._feat_vals is None:
if not self.train_X is None: # attempt to extract feat data types
fv=[]
for ifeat in np.arange(self.train_X.shape[1])+1:
feat_vals=np.sort(np.unique(self.train_X[:,ifeat-1]))
if np.sum(np.abs(np.asarray(feat_vals,dtype='int')-feat_vals))<1e-9:
feat_vals=np.asarray(feat_vals,dtype='int')
fv.append(tuple( feat_vals))
self._feat_vals=fv
return self._feat_vals
@property
def feat_data_types(self):
if self._feat_data_types=='auto':
if not self.train_X is None: # attempt to extract feat data types
feat_data_types=[]
for ifeat in np.arange(self.train_X.shape[1]): # all treated as ordinal
feat_data_types.append('ordinal')
# feat_vals=np.unique(np.ravel(self.train_X[:,ifeat]))
# if False and np.sum(np.abs(np.asarray(feat_vals,dtype='int')-feat_vals))<1e-9: # ELIMINATE NOMINAL FEAT DETECTION FOR NOW
# #print(str(feat_vals) +str(np.sum(np.abs(np.arange(np.min(feat_vals),np.min(feat_vals)+len(feat_vals),dtype='int')-feat_vals))<1e-9))
# if np.sum(np.abs(np.arange(np.min(feat_vals),np.min(feat_vals)+len(feat_vals),dtype='int')-feat_vals))<1e-9:
# if len(feat_vals)<=2:
# feat_data_types[ifeat]='nominal'
self._feat_data_types=feat_data_types
return self._feat_data_types
@property
def feat_labels(self):
if self._feat_labels=='auto':
self._feat_labels=[str(i) for i in np.arange(self.n_features)+1 ]
return self._feat_labels
@timeit # about 30% faster than the simple technique
def get_increasing_leaf_node_pairs_new_old (self):
probs,lowers,uppers=self.get_corner_matrices()
G=nx.DiGraph()
in_feats=np.asarray(self.in_feats)
de_feats=np.asarray(self.de_feats)
mt_feats=list(in_feats).copy()
for i in np.arange(len(de_feats)): mt_feats.append(de_feats[i])
nmt_feats=[f for f in np.arange(self.n_features) if f not in mt_feats]
# initialise graph with comparison of leaf 0 with all other leaves:
n_leaves=len(self.leaf_nodes)
for i in np.arange(n_leaves):
# get master list of comparable leaves
smart_filter=True # 10% faster... but it is faster
if smart_filter:
last_comparible_node=self.drop_down_hypercube(lowers[i,:], uppers[i,:])
leaves_to_check=set(self.get_leaf_ids_under(last_comparible_node))
else:
leaves_to_check=set(np.arange(n_leaves))
leaves_to_check.remove(i)
# check outgoing
check=leaves_to_check.copy()
if i in G.nodes():
check.difference_update(nx.ancestors(G,i))
check.difference_update(nx.descendants(G,i))
while len(check)>0:
j=check.pop()
if self.check_increasing_bool(lowers[i,:], uppers[i,:],lowers[j,:], uppers[j,:],in_feats,de_feats,nmt_feats):
#G.add_node(j)
G.add_edge(i,j)
check.difference_update(nx.descendants(G,j))
# check incoming
check=leaves_to_check.copy()
# check=set(np.arange(n_leaves))
# check.remove(i)
if i in G.nodes():
check.difference_update(nx.ancestors(G,i))
check.difference_update(nx.descendants(G,i))
while len(check)>0:
j=check.pop()
if self.check_increasing_bool(lowers[j,:], uppers[j,:],lowers[i,:], uppers[i,:],in_feats,de_feats,nmt_feats):#(,uppers[i,:]):
#G.add_node(j)
G.add_edge(j,i)
check.difference_update(nx.ancestors(G,j))
return G.edges()
def get_increasing_leaf_node_pairs_new(self):
probs,lowers,uppers=self.get_corner_matrices()
max_pairs=int(np.round(lowers.shape[0]*lowers.shape[0]))
incr_pairs=np.zeros([max_pairs,2],dtype=np.int32)
n_pairs_new=isoensemble.get_increasing_leaf_node_pairs(lowers,uppers,self.mt_feat_types,incr_pairs)
#incr_pairs_old=self.get_increasing_leaf_node_pairs_simple()
return incr_pairs[0:n_pairs_new,:]
@timeit
def get_increasing_leaf_node_pairs_simple (self):
probs,lowers,uppers=self.get_corner_matrices()
G=nx.DiGraph()
in_feats=self.in_feats
de_feats=self.de_feats #np.asarray(self.decr_feats)-1
mt_feats=list(in_feats).copy()
for i in np.arange(len(de_feats)): mt_feats.append(de_feats[i])
nmt_feats=[f for f in np.arange(self.n_features) if f not in mt_feats]
# initialise graph with comparison of leaf 0 with all other leaves:
n_leaves=len(self.leaf_nodes)
for i in np.arange(n_leaves):
for j in np.arange(n_leaves):
if i!=j:
if self.check_increasing_bool(lowers[i,:], uppers[i,:],lowers[j,:], uppers[j,:],in_feats,de_feats,nmt_feats):
G.add_edge(i,j)
if self.check_increasing_bool(lowers[j,:], uppers[j,:],lowers[i,:], uppers[i,:],in_feats,de_feats,nmt_feats):#(,uppers[i,:]):
G.add_edge(j,i)
return G.edges()
# def get_increasing_leaf_node_pairs (self):
# pairs=[]
# for ileaf in np.arange(len(self.leaf_nodes)):
# leaf1=self.leaf_nodes[ileaf]
# for jleaf in np.arange(ileaf+1,len(self.leaf_nodes)):
# if ileaf !=jleaf:
# leaf2=self.leaf_nodes[jleaf]
# if self.check_if_increasing(leaf1, leaf2): # if true leaf1<leaf2
# pairs.append([leaf1.index_leaf,leaf2.index_leaf])
# elif self.check_if_increasing(leaf2, leaf1): # if true leaf1<leaf2
# pairs.append([leaf2.index_leaf,leaf1.index_leaf])
# return pairs
def check_increasing_bool(self,n1_lower, n1_upper,n2_lower, n2_upper,mt_incr_feats,mt_decr_feats,nmt_feats):
if len(mt_incr_feats)>0:
mt_incr=np.all(np.asarray(n1_lower)[mt_incr_feats]<np.asarray(n2_upper)[mt_incr_feats])
else:
mt_incr=True
if mt_incr:
if len(mt_decr_feats)>0:
mt_decr=np.all(np.asarray(n1_upper)[mt_decr_feats]>np.asarray(n2_lower)[mt_decr_feats])
else:
mt_decr=True
if mt_decr:
if len(nmt_feats)==0:
return True
elif np.all(np.asarray(n1_upper)[nmt_feats]>np.asarray(n2_lower)[nmt_feats]):
return np.all(np.asarray(n2_upper)[nmt_feats]>np.asarray(n1_lower)[nmt_feats])
else:
return False
else:
return False
else:
return False
# def check_if_increasing(self,node1, node2):
# mt_feats=self.incr_feats.copy()
# for i in np.arange(len(self.decr_feats)): mt_feats.append(self.decr_feats[i])
# nmt_feats=[f for f in np.arange(self.n_features)+1 if f not in mt_feats]
# if len(mt_feats)==0:
# return False
# else:
# n1_lower=np.asarray(node1.corner_lower.copy(),dtype='float')
# n2_upper=np.asarray(node2.corner_upper.copy(),dtype='float')
# n2_lower=np.asarray(node2.corner_lower.copy(),dtype='float')
# n1_upper=np.asarray(node1.corner_upper.copy(),dtype='float')
# # test incr feats:
# if len(self.incr_feats)>0:
# mt_increasing_feats=np.sum(n1_lower[np.asarray(self.incr_feats)-1]<n2_upper[np.asarray(self.incr_feats)-1])==len(self.incr_feats)
# if not mt_increasing_feats:
# return False
# # test decr feats:
# if len(self.decr_feats)>0:
# mt_decreasing_feats=np.sum(n1_upper[np.asarray(self.decr_feats)-1]>n2_lower[np.asarray(self.decr_feats)-1])==len(self.decr_feats)
# if not mt_decreasing_feats:
# return False
# # check partial monotonicity overap in unconstrained features
# if len(nmt_feats)==0:
# nmt_overlap=True
# else:
# nmt_overlap=np.sum(n1_upper[np.asarray(nmt_feats)-1]>n2_lower[np.asarray(nmt_feats)-1])==len(nmt_feats)
# nmt_overlap=nmt_overlap and np.sum(n1_lower[np.asarray(nmt_feats)-1]<n2_upper[np.asarray(nmt_feats)-1])==len(nmt_feats)
# return nmt_overlap
def get_corner_matrices(self):
n_leaves=len(self.leaf_nodes)
lower=np.zeros([n_leaves, self.n_features],dtype=np.float64)
upper=np.zeros([n_leaves, self.n_features],dtype=np.float64)
probabilities=np.zeros([n_leaves, self.n_classes])
for i in np.arange(n_leaves):
lower[i,:]=self.leaf_nodes[i].corner_lower.copy()
upper[i,:]=self.leaf_nodes[i].corner_upper.copy()
if len(self.leaf_nodes[i].probabilities)==0:
print('guh!!')
probabilities[i,:]=self.leaf_nodes[i].probabilities.copy()
return [probabilities,lower,upper]
def drop_down_hypercube(self,lower, upper,node=None):
if node is None: node=self.root_node
if node.is_leaf():
return node
elif upper[node.decision_feat-1]<=node.decision_values:
return self.drop_down_hypercube(lower, upper,node.left)
elif lower[node.decision_feat-1]>node.decision_values:
return self.drop_down_hypercube(lower, upper,node.right)
else:
return node
def get_leaf_ids_under(self,node):#,leaf_ids=[]):
if node.index==self.root_node.index:
return np.arange(len(self.leaf_nodes))
elif node.is_leaf():
return np.asarray([node.index_leaf])
else:
leaves=[]
for leaf in self.leaf_nodes:
if node.index in leaf.path:
leaves.append(leaf.index_leaf)
return np.asarray(leaves)
# if node.is_leaf():
# #leaf_ids.append(node.index_leaf)
# return [node.index_leaf] #leaf_ids.copy()
# else:
# leaf_ids_left= self.get_leaf_ids_under(node.left,leaf_ids).copy()
# leaf_ids_right=self.get_leaf_ids_under(node.right,leaf_ids).copy()
# return leaf_ids_left+leaf_ids_right
def number_nodes(self,calc_resubs_err=False):
if not self.root_node is None:
self.leaf_nodes=[]
self.branch_nodes=[]
queue=deque()
queue.append(self.root_node)
index=0
index_leaf=0
while len(queue)>0:
node=queue.popleft()
node.resubst_err_branch=0
node.num_leaves_branch=0
node.index=index
if node.index!=self.root_node.index:
node.path=node.parent.path+[node.parent.index]
index=index+1
if node.is_leaf():
node.index_leaf=index_leaf
self.leaf_nodes.append(node)
index_leaf=index_leaf+1
if calc_resubs_err:
if node.parent!=None: # not root node
self.propagate_leaf_data_to_parents(node,node.parent)
else:
self.branch_nodes.append(node)
queue.append(node.left)
queue.append(node.right)
if len(self.leaf_nodes)>self.peak_leaves:
self.peak_leaves=len(self.leaf_nodes)
def propagate_leaf_data_to_parents(self,node,parent):
parent.resubst_err_branch=parent.resubst_err_branch+node.resubst_err_node
parent.num_leaves_branch=parent.num_leaves_branch+1
if parent.parent!=None:# notroot node:
self.propagate_leaf_data_to_parents(node,parent.parent)
def drop_down(self,node,Xi,path):
path.append(node.index)
if node.is_leaf():
probs=[]
probs_raw=node.probabilities
i_raw_prob=0
for i in np.arange(len(self.classes)):
if i in node.classes: #tally.keys():
probs.append(probs_raw[i_raw_prob])
i_raw_prob=i_raw_prob+1
else:
probs.append(0.)
return [node.index_leaf,probs,path]
else: # not a leaf node, evaluate and send on way
[indx_left, indx_right,split_pt_act]=self.get_split_indxs(Xi,node.decision_feat,node.decision_values,node.decision_data_type)
return self.drop_down(node.left if indx_left==[0] else node.right, Xi,path)
return
def apply_base(self,X):
probs=np.zeros([X.shape[0],len(self.classes)])
paths=[]
ileaf_indexes=np.zeros(X.shape[0])
for i in np.arange(X.shape[0]):
if i==26:
pass
[ileaf,probs_,path]=self.drop_down(self.root_node,X[i,:],[])
probs[i,:]=probs_
paths.append(path)
ileaf_indexes[i]=ileaf
return [ileaf_indexes,probs,paths]
def predict_proba(self,X):
[ileaf,probs_,paths]=self.apply_base(X)
return probs_
def apply(self,X):
[ileaf,probs_,paths]=self.apply_base(X)
return ileaf
def get_pairs_to_split(self,pm_pairs_clean,nmt_pairs,normalise_nmt_nodes):
if normalise_nmt_nodes==0:
pairs_to_split=[] #if normalise_nmt_nodes==1 else pm_pairs_clean
elif normalise_nmt_nodes==1:
pairs_to_split=nmt_pairs
elif normalise_nmt_nodes==2:
pairs_to_split=pm_pairs_clean
elif normalise_nmt_nodes==3:
pairs_to_split=self.get_non_monotone_pairs_extended(nmt_pairs,pm_pairs_clean)
return pairs_to_split
# def set_leaf_sizes(self,sizes):
# i=0
# for leaf in self.leaf_nodes:
# leaf.size=sizes[0]
# i=i+1
# returns 0: no changes required to monotonise
# 1: changes made to monotonise
@timeit
def monotonise(self,incr_feats,decr_feats,sample_reweights=None,normalise_nmt_nodes=0,split_criterion=None, split_class=None,split_weight=None,min_split_weight=0.5,univariate_distns=None):
self.set_mt_feats(incr_feats,decr_feats)
self.split_criterion=split_criterion
self.split_class=split_class
self.split_weight=split_weight
self.min_split_weight=min_split_weight
self.univariate_distns=univariate_distns
#self.incr_feats=incr_feats.copy()
#self.decr_feats=decr_feats.copy()
use_latest_nmt_pairs=True
if use_latest_nmt_pairs:
# WAY FOR MOST SOLVES:
pm_pairs=self.get_increasing_leaf_node_pairs_new()
pm_pairs_clean=self.eliminate_unnecessary_incr_pairs(pm_pairs)
nmt_pairs=self.get_non_monotone_pairs(pm_pairs_clean)
else:
# ALTERNATE SIMPLE WAY
pm_pairs=self.get_increasing_leaf_node_pairs_simple()
pm_pairs_clean=pm_pairs
nmt_pairs=self.get_non_monotone_pairs(pm_pairs)
# pairs check:
# pm_pairs_simple=self.get_increasing_leaf_node_pairs_simple()
# pm_pairs_clean_simple=self.eliminate_unnecessary_incr_pairs(pm_pairs_simple)
# print([len(pm_pairs_clean_simple),len(pm_pairs_clean)])
#print(len(pm_pairs_clean))
#nmt_pairs_extended=self.get_non_monotone_pairs_extended(nmt_pairs,pm_pairs_clean)
pairs_to_split=self.get_pairs_to_split(pm_pairs_clean,nmt_pairs,normalise_nmt_nodes) #nmt_pairs if normalise_nmt_nodes==1 else pm_pairs_clean
#print('Num pairs [orig,clean,nmt,super_clean]: ' + str([len(pm_pairs),len(pm_pairs_clean),len(nmt_pairs),len(cleaned_pairs)]))
if nmt_pairs==[]: # already monotone
return 0
if normalise_nmt_nodes>0:
if False:
###### TECHNIQUE A - very slow due to continually recalculating get_increasing_leaf_node_pairs_new() ##########
# pick first nmt edge and normalise nodes
keep_going=True
while keep_going:
keep_going=False
for pair in nmt_pairs:
change_made1=self.grow_segregated_nodes(self.leaf_nodes[pair[0]],self.leaf_nodes[pair[1]])
change_made2=self.grow_segregated_nodes(self.leaf_nodes[pair[1]],self.leaf_nodes[pair[0]])
if change_made1 or change_made2:
self.number_nodes()
#print(' now: ' + str(len(self.leaf_nodes)))
if use_latest_nmt_pairs:
# WAY FOR MOST SOLVES:
pm_pairs=self.get_increasing_leaf_node_pairs_new()
pm_pairs_clean=self.eliminate_unnecessary_incr_pairs(pm_pairs)
nmt_pairs=self.get_non_monotone_pairs(pm_pairs_clean)
else:
# ALTERNATE SIMPLE WAY
pm_pairs=self.get_increasing_leaf_node_pairs_simple()
pm_pairs_clean=pm_pairs
nmt_pairs=self.get_non_monotone_pairs(pm_pairs)
keep_going=True
break
else:
####### TECHNIQUE B - between 50% and 85% faster ##########
keep_going=True
while keep_going:
keep_going=False
change_made=False
changed_nodes=[]
#pairs_to_split=nmt_pairs if normalise_nmt_nodes==1 else pm_pairs_clean
for pair in pairs_to_split:
if True:# pair[0] not in changed_nodes and pair[1] not in changed_nodes : # this filter seems a tad slower and hence complexity is not warranted
change_made1=self.grow_segregated_nodes(self.leaf_nodes[pair[0]],self.leaf_nodes[pair[1]])
change_made2=self.grow_segregated_nodes(self.leaf_nodes[pair[1]],self.leaf_nodes[pair[0]])
if change_made1: changed_nodes.append(pair[0])
if change_made2: changed_nodes.append(pair[1])
change_made=change_made or change_made1 or change_made2
if change_made:
self.number_nodes()
#print(' now: ' + str(len(self.leaf_nodes)))
pm_pairs=self.get_increasing_leaf_node_pairs_new()
pm_pairs_clean=self.eliminate_unnecessary_incr_pairs(pm_pairs)
nmt_pairs=self.get_non_monotone_pairs(pm_pairs_clean)
pairs_to_split=self.get_pairs_to_split(pm_pairs_clean,nmt_pairs,normalise_nmt_nodes) #nmt_pairs if normalise_nmt_nodes==1 else pm_pairs_clean
keep_going=True
#break
#nmt_graph=nx.DiGraph()
#nmt_graph.add_edges_from(nmt_pairs)
# for nd in nmt_graph.nodes():
# if len(nmt_graph.neighbors(nd))==1:
# change_made=self.grow_segregated_nodes(self.leaf_nodes[nd],self.leaf_nodes[ nmt_graph.neighbors(nd)[0] ])
#print(' Final leaf nodes: ' + str(len(self.leaf_nodes)))
#else: # we have non-monotone pairs
cleaned_pairs=self.clean_monotone_island_pairs(pm_pairs_clean,nmt_pairs)
if sample_reweights is None:
weights=self.get_leaf_sizes()
else:
weights=self.recalc_leaf_sizes(sample_reweights)
cdf=self.get_cum_probabilities()
# solve new pdfs
cdf_iso=np.ones(cdf.shape)
pdf_iso=np.zeros(cdf.shape)
cum_sse=0.
for i_class in np.arange(cdf.shape[1]):
probs_class=cdf[:,i_class]
gir=isoensemble.GeneralisedIsotonicRegression()
if i_class<cdf.shape[1]-1:
#cdf_iso[:,i_class]=gir.fit(probs_class,pm_pairs_clean,sample_weight=weights,increasing=False)
#print(probs_class)
cdf_iso[:,i_class]=np.round(gir.fit(probs_class,cleaned_pairs,sample_weight=weights,increasing=False),6)
if i_class==0:
pdf_iso[:,i_class]=cdf_iso[:,i_class]
else:
pdf_iso[:,i_class]=cdf_iso[:,i_class]-cdf_iso[:,i_class-1]
cum_sse=np.sum((cdf_iso-cdf)**2)
# update leaf probabilities
if cum_sse>1e-7: # some changes were made
for leaf in self.leaf_nodes:
leaf._probabilities=list(pdf_iso[leaf.index_leaf,:])
if np.isnan(leaf._probabilities[0]):
print('what the')
res= 1
else: # effectively no changes made
res= 0
#print(cdf-cdf_iso)
# check we now have monotone tree:
nmt_pairs2=self.get_non_monotone_pairs(pm_pairs_clean)
if len(nmt_pairs2)>0:
print('ERROR: orig nmt pairs:' + str(len(nmt_pairs)) + " now: " + str(len(nmt_pairs2)))
return res
def get_leaf_sizes(self):
weights=np.zeros(len(self.leaf_nodes))
for leaf in self.leaf_nodes:
weights[leaf.index_leaf]=leaf.size
return weights
def recalc_leaf_sizes(self,new_orig_sample_weights):
weights=np.zeros(len(self.leaf_nodes))
for leaf in self.leaf_nodes:
for i in leaf.train_data_idx:
weights[leaf.index_leaf]=weights[leaf.index_leaf] +new_orig_sample_weights[i] #leaf.size
return weights
def get_cum_probabilities(self) :
cdf=np.zeros([len(self.leaf_nodes),len(self.classes)])
for leaf in self.leaf_nodes:
for i_class in np.arange(len(self.classes)):
if i_class==0:
cdf[leaf.index_leaf,i_class]=leaf.probabilities[0]
else:
cdf[leaf.index_leaf,i_class]=cdf[leaf.index_leaf,i_class-1]+leaf.probabilities[i_class]
return cdf
def extend_graph_until(self,master_graph,output_graph,pair,dirn,terminate_at):
if dirn==1:
next_nodes=master_graph.successors(pair[1])
for inode in next_nodes:
if self.leaf_nodes[inode].predicted_class!=terminate_at:
new_pair=[pair[1],inode]
output_graph.add_edge(new_pair[0],new_pair[1])
self.extend_graph_until(master_graph,output_graph,new_pair,dirn,terminate_at)
else:
next_nodes=master_graph.predecessors(pair[0])
for inode in next_nodes:
if self.leaf_nodes[inode].predicted_class!=terminate_at:
new_pair=[inode,pair[0]]
output_graph.add_edge(new_pair[0],new_pair[1])
self.extend_graph_until(master_graph,output_graph,new_pair,dirn,terminate_at)
return
@timeit
def clean_monotone_island_pairs(self,pm_pairs_clean,nmt_pairs):
graph=nx.DiGraph()
graph.add_edges_from(pm_pairs_clean)
ud_graph=graph.to_undirected()
nodes_with_constraints =set(graph.nodes())
unchecked_nodes=nodes_with_constraints.copy()
polluted_nodes=set(np.unique(np.ravel(np.asarray(nmt_pairs))))
safe_island_nodes_to_remove=[]#set()
for n in graph.nodes():
if graph.predecessors(n) | |
<filename>src/apps/core/models/ModuleModels.py
from datetime import timedelta
from django.utils.timezone import now
# from cms.models import PlaceholderField, ValidationError, uuid
import uuid
from django.urls import reverse
from django.db import models, transaction
from django.conf import settings
from django_extensions.db.fields import (
AutoSlugField
)
from taggit.managers import TaggableManager
from src.apps.core.managers.IterativeDeletionManagers import (
IterativeDeletion_Manager,
PolyIterativeDeletion_Manager
)
from src.apps.core.models.HS_AppFrameModels import AppReference
from src.apps.core.models.LearningObjModels import Learning_Objective
from src.apps.core.models.PublicationModels import (
Publication,
PolyPublicationChild
)
# from cms.utils.copy_plugins import copy_plugins_to
User = settings.AUTH_USER_MODEL
class Lesson(Publication):
# TODO: if needed for publishable, can inherit parent's meta
# class Meta(Publishable.Meta):
class Meta:
app_label = 'core'
#unique_together = ('parent_lesson', 'name') # enforce only unique topic names within a module
#ordering = ('name',)
ordering = ('position',)
verbose_name_plural = 'Lessons'
########################################
# Fields
########################################
# boolean field representing if a lesson is (soft) deleted
# TODO: this has not been factored into the system yet, implementation will require revision of delete method
is_deleted = models.BooleanField(default=False)
# the reference id for a lesson (used in slug generation)
# this reference id will be maintained for all copies of this lesson
ref_id = models.UUIDField(default=uuid.uuid4, editable=False)
# marks the parent lesson for a lesson
# this field will be auto-populated by the generated forms
# it from the dynamic interface
parent_lesson = models.ForeignKey('self',
related_name="sub_lessons",
blank=True,
default=None,
help_text=u'Specify a Parent Lesson for this Sub-Lesson.',
null=True,
on_delete=models.CASCADE,
)
# position amongst siblings, siblings can be of type Lessons or Sections
position = models.PositiveIntegerField(default=0, blank=False, null=False)
# zero based depth level of lesson
# exclusively set by backend
depth = models.PositiveIntegerField(default=0, blank=False, null=False)
# depth Identifier
# exclusively set by backend
depth_label = models.CharField(u'Depth Label',
blank=False,
default='Module',
help_text=u'The depth-level label for this lesson',
max_length=10,
unique=False,
)
name = models.CharField(u'Lesson Name',
blank=False,
default='',
help_text=u'Please enter a name for this Lesson',
max_length=250,
unique=False,
)
short_name = models.CharField(u'Lesson Short Name',
blank=True,
default='',
help_text=u'(OPTIONAL) A shortened version of this lesson\'s name for use in lesson listings',
max_length=250,
unique=False,
)
slug = AutoSlugField(u'slug',
blank=False,
default='',
max_length=8,
unique=True,
populate_from=('ref_id',),
help_text=u'Please enter a unique slug for this Lesson (can autogenerate from name field)',
)
tags = TaggableManager(blank=True)
# many to many relationship for collaborators
# allowed to make edits to the draft of a publication
collaborators = models.ManyToManyField(User, related_name="collaborations", through='Collaboration')
# the content of this lesson
# summary = PlaceholderField('lesson_summary')
summary = models.TextField(u'Lesson Summary',
blank=True,
default='',
help_text="Enter the content for this lesson summary")
########################################
# Cloning references
########################################
# the date this lesson was cloned from a published lesson
derived_date = models.DateTimeField(null=True)
# the published lesson this lesson was derived from's ref_id
derived_lesson_slug = models.CharField(null=True, default=None, editable=False, max_length=8)
# the user that created the lesson this was derived from
derived_lesson_creator = models.ForeignKey(User, null=True, blank=True, related_name='inspired_lessons')
# the default related name for this many-to-many field is lesson_set
#
# learning_objectives = models.ManyToManyField('core.LearningObjective')
# the default related name for this many-to-many field is lesson_set
# these will potentially be polymorphic to account for different
# resource types potentially needing different attributes
#
# resources = models.ManyToManyField('core.Resource')
# TODO: potentially add 1-to-1 relationship to a publishable (instead of direct inheritance)
# this will allow for a lesson to be a child and root
# e.g. root.publishable = [publishable object], child.publishable = None
# ______________________________
# parent_link
# When True and used in a model which inherits from another
# concrete model, indicates that this field should be used as
# the link back to the parent class, rather than the extra
# OneToOneField which would normally be implicitly created
# by subclassing.
#
# publishable = models.OneToOneField('core.Publishable', default=None, on_delete=modeld.SET_NULL, parent_link=True)
def __str__(self):
return self.name
########################################
# URL Methods
########################################
# define for use by FormMixin
# (calls this method specifically, but isn't defined by default... right...)
def get_absolute_url(self):
return self.absolute_url()
def absolute_url(self):
return reverse('core:lesson_detail', kwargs={
'slug': self.slug
})
# path to the manage page for a topic
def manage_url(self):
return reverse('manage:lesson_content', kwargs={
'slug': self.slug
})
# path to the edit page for a topic
def edit_url(self):
return reverse('editor:lesson_edit', kwargs={
'slug': self.slug
})
# path to the viewer page for a topic
def viewer_url(self):
return reverse('module:lesson_detail', kwargs={
'slug': self.slug
})
# path to the viewer 'module' page for a module
def reference_url(self):
return reverse('module:module_ref', kwargs={
'ref_id': self.ref_id,
})
########################################
# Query Methods/properties
########################################
# TODO: Watch for this, as formsets may not access this with update
def save(self, **kwargs):
# print('---- in custom lesson save')
# set depth level on save
if self.parent_lesson:
self.depth = self.parent_lesson.depth + 1
else:
self.depth = 0
# TODO: this needs to be flipped
# based on depth level set the depth label
self.depth_label = {
0: 'Module',
1: 'Topic',
2: 'Lesson',
}.get(self.depth, "INVALID")
super(Lesson, self).save(**kwargs)
def validate_unique(self, exclude=None):
# add a conditional unique constraint to prevent
# creation of multiple drafts with the same name
# this is only valid if a base lesson so check that it's not a root lesson too
# TODO: watch this, it could be inadequate when 'lesson-copy' becomes enabled later in development
# if not self.parent_lesson and self.is_draft and Lesson.objects.exclude(pk=self.pk).filter(name=self.name, is_draft=True).exists():
# raise ValidationError('A Draft-Lesson with this name already exists')
return super(Lesson, self).validate_unique(exclude)
@property
def total_depth(self):
'''
method to return the total depth of this lesson's structure
(the max level of nested children)
:return: integer representation of child depth
'''
if self.sub_lessons:
max_depth = 0
for sub_lesson in self.sub_lessons.all():
max_depth = max(max_depth, sub_lesson.total_depth)
return max_depth + 1
else:
return 1
@property
def num_children(self):
return self.num_sections + self.num_sub_lessons
@property
def num_sections(self):
return self.sections.count()
@property
def num_sub_lessons(self):
return self.sub_lessons.count()
def derivation(self, user=None):
'''
Method to copy a published lesson instance and set
derivation attributes to point to this lesson and it's creator
:return: new lesson instance with attributes set to link to derived lesson
'''
assert user, "User generating derivation must be provided."
derivation = self.copy(user)
derivation.derived_date = now()
derivation.derived_lesson_slug = self.slug
derivation.derived_lesson_creator = self.created_by
return derivation
def derive_children_from(self, user=None, from_lesson=None):
assert user, "User deriving the children must be provided."
assert from_lesson, "Lesson to derive from must be provided."
self.sections.delete()
self.sub_lessons.delete()
for section_item in from_lesson.sections.all():
# copy the section items and set their linked lesson to this new instance
new_section = section_item.copy(user)
new_section.lesson = self
new_section.position = section_item.position
# save the copied section instance
new_section.save()
new_section.copy_content(section_item)
new_section.copy_children(user, section_item)
for sub_lesson in from_lesson.sub_lessons.all():
# copy the sub-lesson items and set their linked parent_lesson to this new instance
new_lesson = sub_lesson.derivation(user)
new_lesson.parent_lesson = self
new_lesson.position = sub_lesson.position
# save the copied sub-lesson instance
new_lesson.save()
new_lesson.copy_content(sub_lesson)
new_lesson.derive_children_from(user, sub_lesson)
########################################
# Publication Method overrides
########################################
def copy(self, user=None, maintain_ref=False):
"""
generate a new (unsaved) lesson instance based on this lesson, with a fresh ref_id if specified.
Notes:
The newly generated instance:
- removes reference to parent
- marks 'position' as 0
- and sets 'is_deleted' to False
Additionally, this method does not copy placeholder(content), tags, collaborators, or
child-objects (use copy_content (or copy_children for children) after save to do this)
:param user: the user copying the Lesson
:param maintain_ref: Boolean representing if the ref_id should be maintained on the child objects, this should only be true in the case of publication.
:return: a new (unsaved) copy of this lesson
"""
assert user, "The user generating the copy must be provided."
if maintain_ref:
assert user == self.get_owner(), "Only the lesson owner can generate a copy with the same Reference Id."
new_instance = Lesson(
parent_lesson=None,
position=0,
is_deleted=False,
name=self.name,
short_name=self.short_name,
summary=self.summary,
created_by=user,
changed_by=user,
)
# if specified, mark this new instance as the same lesson
# typically only used in publication methods
if maintain_ref:
new_instance.ref_id = self.ref_id
if self.derived_date:
new_instance.derived_date = self.derived_date
new_instance.derived_lesson_slug = self.derived_lesson_slug
new_instance.derived_lesson_creator = self.derived_lesson_creator
return new_instance
def copy_children(self, user=None, from_instance=None, maintain_ref=False):
'''
Copy child relations (sub_lessons/sections) from a passed lesson, with the option of specifying
if the ref_id should be maintained. this should only happen during publishing.
:param user: the user copying the children
:param from_instance: Lesson instance from which the child relations are provided.
:param maintain_ref: Boolean representing if the ref_id should be maintained on the | |
<filename>src/server.py<gh_stars>1-10
from flask import Flask, Response, redirect, url_for, request, send_file, session
from werkzeug.utils import header_property
from flask_session import Session
from werkzeug.utils import secure_filename
import json
from tags import MusicFileHandler
from database import DatabaseHandler
from utils import returnJSON
from flask_cors import CORS
import hashlib
# Normalize file names from uploads
from unicodedata import normalize
from os import path, makedirs
# send music pictures
from io import BytesIO
app = Flask(__name__)
CORS(app)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
db = DatabaseHandler("music.db")
"""
Database: implemented
/database-informations GET
/update-database-informations POST {name: new database name, description: new database description}
Database: not implemented
Nothing \o/
"""
@app.route('/database-informations')
def databaseInformations():
retCode = -1
retMessage = "Failed to get database informations"
infos = db.getDatabaseInformations()
content = {"name": "", "description": ""}
if not isinstance(infos, type(None)):
content["name"] = infos[1]
content["description"] = infos[2]
retCode = 0
retMessage = "Success"
return returnJSON(retCode, retMessage, content)
@app.route('/update-database-informations', methods = ["POST"])
def updateDatabaseInformations():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retCode = -1
retMessage = "Missing parameters"
if not request.is_json:
return returnJSON(-1, "Please send a json")
c = request.json
if "name" not in c or c["name"] == "":
return returnJSON(retCode, retMessage)
elif "description" not in c or c["description"] == "":
return returnJSON(retCode, retMessage)
else:
retCode = 0
retMessage = "Database informations updated successfully"
db.setDatabaseInformations(c["name"], c["description"])
return returnJSON(retCode, retMessage)
"""
Musics: implemented
/get-musics GET
/get-albums GET
/get-artists GET
/get-music/<music_id> GET
/get-album/<album_id> GET
/get-artist/<artist_id> GET
/get-music-picture/<music_id> GET (not recommanded to use, see /get-album-picture instead)
/get-album-picture/<album_id> GET
/get-music-file/<music_id> GET
/upload-music POST {music: music file to upload}
Musics: not implemented
/update-music POST
/update-album POST
/update-artist POST (maybe)
/remove-music/<music_id> POST
/remove-album/<album_id> POST
/remove-artist/<artist_id> POST
"""
@app.route('/get-musics')
def getMusics():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getMusicsForUser(user_id)
return returnJSON(0, "Success", response)
@app.route('/get-albums')
def getAlbums():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getAlbumsForUser(user_id)
return returnJSON(0, "Success", response)
@app.route('/get-music/<int:music_id>')
def getMusic(music_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getMusicForUser(music_id, user_id)
return returnJSON("0", "Success", response)
@app.route('/get-album/<int:album_id>')
def getAlbum(album_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getAlbumForUser(album_id, user_id)
if response == {}:
retCode = -1
retMessage = "Album not found"
else:
retCode = 0
retMessage = "Success"
return returnJSON(retCode, retMessage, response)
@app.route('/get-music-picture/<int:music_id>')
def getMusicPicture(music_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getMusicForUser(music_id, user_id)
if response == {}:
return returnJSON(-1, "Music does not exist")
else:
picture = db.getAlbumPictureForUser(response["album_id"], user_id)
if picture != {} and picture["album_picture"] != None:
return send_file(
BytesIO(picture["album_picture"]),
mimetype=picture["album_picture_mime"],
as_attachment=False,
download_name="cover")
else:
with open("ressources/music_default.png", "br") as f:
return send_file(
BytesIO(f.read()),
mimetype="image/png",
as_attachment=False,
download_name="cover")
@app.route('/get-album-picture/<int:album_id>')
def getAlbumPicture(album_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
picture = db.getAlbumPictureForUser(album_id, user_id)
if picture != {} and picture["album_picture"] != None:
return send_file(
BytesIO(picture["album_picture"]),
mimetype=picture["album_picture_mime"],
as_attachment=False,
download_name="cover")
else:
with open("ressources/music_default.png", "br") as f:
return send_file(
BytesIO(f.read()),
mimetype="image/png",
as_attachment=False,
download_name="cover")
@app.route('/get-music-file/<int:music_id>')
def getMusicFile(music_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getMusicForUser(music_id, user_id)
if response == {}:
return returnJSON(-1, "File does not exist")
else:
return send_file(response["path"])
@app.route('/get-artists')
def getArtists():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getArtistsForUser(user_id)
return returnJSON(0, "Success", response)
@app.route('/get-artist/<int:artist_id>')
def getArtist(artist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getArtistForUser(artist_id, user_id)
if response == {}:
retCode = -1
retMessage = "Artist not found"
else:
retCode = 0
retMessage = "Success"
return returnJSON(retCode, retMessage, response)
@app.route('/upload-music', methods = ["POST"])
def uploadMusic():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
f = request.files["music"]
# https://blog.csdn.net/qq_36390239/article/details/98847888 nice
filename = normalize('NFKD', f.filename).encode('utf-8', ).decode()
for sep in path.sep, path.altsep:
if sep:
filename = filename.replace(sep, ' ')
makedirs("downloads", exist_ok=True)
saving_path = "downloads/" + filename
f.save(saving_path)
music = MusicFileHandler(saving_path)
if not music.OK():
return returnJSON(-1, "Error getting tags")
db.addMusicToUser(filename, saving_path, user_id)
return returnJSON(0, "Music saved as \"" + filename + "\"", music.getTags())
"""
Playlists: implemented
/get-playlists GET
/get-playlist/<playlist_id> GET
/create-playlist POST {name: playlist name, description: playlist description}
/update-playlist/<playlist_id> POST {name: new playlist name, description: new playlist description}
/add-musics-to-playlist/<playlist_id> POST {musics: music IDs separated by ";" (ex. "2;23;10;38")}
/add-music-to-playlist/<playlist_id>/<music_id> GET
/remove-playlist/<playlist_id> GET
/remove-music-from-playlist/<playlist_id>/<music_id> GET
/remove-musics-from-playlist/<playlist_id> POST {musics: music IDs separated by ";" (ex. "2;23;10;38")}
Playlists: not implemented
nothing \o/
"""
@app.route('/get-playlists')
def getPlaylists():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getPlaylistsForUser(user_id)
return returnJSON(0, "Success", response)
@app.route('/get-playlist/<int:playlist_id>')
def getPlaylist(playlist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
response = db.getPlaylistForUser(playlist_id, user_id)
if response == {}:
retCode = -1
retMessage = "Playlist not found"
else:
retCode = 0
retMessage = "Success"
return returnJSON(retCode, retMessage, response)
@app.route('/create-playlist', methods = ['POST'])
def CreatePlaylist():
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retMessage = "Failed to create playlist"
if not request.is_json:
return returnJSON(-1, "Please send a json")
c = request.json
if "name" not in c or "description" not in c:
return returnJSON(-1, "Missing parameters")
if c["name"] == "":
return returnJSON(-1, "Playlist name can't be empty")
retCode, retMessage, id = db.createPlaylistForUser(c["name"], c["description"], user_id)
return returnJSON(retCode, retMessage, id)
@app.route('/update-playlist/<int:playlist_id>', methods = ['POST'])
def UpdatePlaylist(playlist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retMessage = "Failed to update playlist"
if not request.is_json:
return returnJSON(-1, "Please send a json")
c = request.json
if "name" not in c or "description" not in c:
return returnJSON(-1, "Missing parameters")
if c["name"] == "":
return returnJSON(-1, "Playlist name can't be empty")
retCode, retMessage = db.updatePlaylistForUser(playlist_id, c["name"], c["description"], user_id)
return returnJSON(retCode, retMessage, {})
@app.route('/add-musics-to-playlist/<int:playlist_id>', methods = ['POST'])
def AddMusicsToPlaylist(playlist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retMessage = "Failed adding musics to playlist"
if not request.is_json:
return returnJSON(-1, "Please send a json")
c = request.json
if "musics" not in c or c["musics"] == "":
return returnJSON(-1, "Missing parameters")
musics = c["musics"].split(";")
retCode, retMessage, added = db.addMusicsToPlaylistForUser(playlist_id, musics, user_id)
return returnJSON(retCode, retMessage, added)
@app.route('/add-music-to-playlist/<int:playlist_id>/<int:music_id>')
def AddMusicToPlaylist(playlist_id:int, music_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retCode, retMessage, added = db.addMusicToPlaylistForUser(playlist_id, music_id, user_id)
return returnJSON(retCode, retMessage, added)
@app.route('/remove-playlist/<int:playlist_id>')
def RemovePlaylist(playlist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, username = checkAuth()
if retCode == -1:
return returnJSON(retCode, retMessage)
# Check connexion
retCode, retMessage = db.removePlaylistForUser(playlist_id, user_id)
return returnJSON(retCode, retMessage)
@app.route('/remove-musics-from-playlist/<int:playlist_id>', methods = ['POST'])
def RemoveMusicsFromPlaylist(playlist_id:int):
# Check connexion
retCode = -1
retMessage = "Wrong token or x-access-token header not set"
retCode, user_id, | |
"▁Temer": 13432,
"69)": 13433,
"▁Gim": 13434,
"▁Miko": 13435,
"▁Tax": 13436,
"▁has": 13437,
"▁Boc": 13438,
"vé": 13439,
"▁SAP": 13440,
"1.3": 13441,
"608": 13442,
"▁Barba": 13443,
"▁sco": 13444,
"▁eu": 13445,
"210": 13446,
"▁0.0": 13447,
"▁Zub": 13448,
"mov": 13449,
"wad": 13450,
"▁Lex": 13451,
"bian": 13452,
"▁Glasgow": 13453,
"sset": 13454,
"▁Tek": 13455,
"iy": 13456,
"▁(9)": 13457,
"▁Sey": 13458,
"mida": 13459,
"▁(1996)": 13460,
"▁Clement": 13461,
"▁Ein": 13462,
"446": 13463,
"▁Mathe": 13464,
"ors": 13465,
"Ed": 13466,
"dri": 13467,
"▁15-": 13468,
"bow": 13469,
"▁Ark": 13470,
"loo": 13471,
"▁Read": 13472,
"▁Scandinav": 13473,
"gale": 13474,
"mAh": 13475,
"odi": 13476,
"▁Jump": 13477,
"dre": 13478,
"▁basa": 13479,
"641": 13480,
"▁1872": 13481,
"▁Belgrad": 13482,
"▁Mare": 13483,
"▁Sut": 13484,
"▁kit": 13485,
"205": 13486,
"51)": 13487,
"554": 13488,
"kah": 13489,
"strada": 13490,
"▁Rhe": 13491,
"syn": 13492,
"QI": 13493,
"tice": 13494,
"▁pec": 13495,
"ndi": 13496,
"JD": 13497,
"▁Exchange": 13498,
"▁Henrique": 13499,
"▁retro": 13500,
"▁Capri": 13501,
"sini": 13502,
"▁Amor": 13503,
"▁Elev": 13504,
"▁Sei": 13505,
"▁Blackberry": 13506,
"FD": 13507,
"▁Community": 13508,
"alle": 13509,
"▁Nis": 13510,
"▁Pea": 13511,
"21)": 13512,
"RU": 13513,
"▁Pero": 13514,
"▁ky": 13515,
"hausen": 13516,
"▁Tex": 13517,
"▁fasci": 13518,
"IST": 13519,
"prop": 13520,
"124": 13521,
"crypt": 13522,
"IMA": 13523,
"004": 13524,
"face": 13525,
"wind": 13526,
"▁ERP": 13527,
"+5": 13528,
"tse": 13529,
"▁Martha": 13530,
"788": 13531,
"soft": 13532,
"til": 13533,
"▁Monta": 13534,
"lani": 13535,
"▁Baja": 13536,
"rish": 13537,
"Hz": 13538,
"▁Heavy": 13539,
"iser": 13540,
"189": 13541,
"zka": 13542,
"▁FO": 13543,
"ATI": 13544,
"▁Asa": 13545,
"▁Halo": 13546,
"▁Lav": 13547,
"▁chun": 13548,
"EST": 13549,
"Music": 13550,
"▁AND": 13551,
"fir": 13552,
"▁Somali": 13553,
"▁Montpellier": 13554,
"shah": 13555,
"▁Maga": 13556,
"119": 13557,
"▁Esse": 13558,
"berger": 13559,
"▁MSI": 13560,
"▁Michelin": 13561,
"▁line": 13562,
"142": 13563,
"tara": 13564,
"▁Vaz": 13565,
"bing": 13566,
"dit": 13567,
"gnac": 13568,
"bag": 13569,
"mons": 13570,
"White": 13571,
"osto": 13572,
"sili": 13573,
"▁Dev": 13574,
"▁Kap": 13575,
"▁Pena": 13576,
"557": 13577,
"rra": 13578,
"▁Brug": 13579,
"tem": 13580,
"466": 13581,
"cycle": 13582,
"els": 13583,
"▁col": 13584,
"▁Quer": 13585,
"peri": 13586,
"▁Gael": 13587,
"meri": 13588,
"oj": 13589,
"▁Soccer": 13590,
"658": 13591,
"Gal": 13592,
"▁Bord": 13593,
"belle": 13594,
"▁tong": 13595,
"▁Sweet": 13596,
"652": 13597,
"cafe": 13598,
"roid": 13599,
"▁Fantastic": 13600,
"MH": 13601,
"South": 13602,
"▁Krishna": 13603,
"▁Gud": 13604,
"mier": 13605,
"▁makeup": 13606,
"Web": 13607,
"▁Rasmussen": 13608,
"▁Adelaide": 13609,
"▁NGƯỜI": 13610,
"▁tuk": 13611,
"129": 13612,
"256": 13613,
"RN": 13614,
"rite": 13615,
"▁[1": 13616,
"OX": 13617,
"▁Grind": 13618,
"▁Sole": 13619,
"▁Analytic": 13620,
"▁Tala": 13621,
"Gre": 13622,
"cord": 13623,
"osi": 13624,
"▁Sonic": 13625,
"▁Architect": 13626,
"877": 13627,
"NES": 13628,
"▁Mauro": 13629,
"6.5": 13630,
"542": 13631,
"owo": 13632,
"store": 13633,
"▁Tale": 13634,
"thana": 13635,
"▁1857": 13636,
"844": 13637,
"Fu": 13638,
"force": 13639,
"▁Mani": 13640,
"▁Amal": 13641,
"▁Bibi": 13642,
"▁Rashid": 13643,
"▁sticker": 13644,
"▁Britain": 13645,
"beli": 13646,
"▁Cons": 13647,
"▁[2]": 13648,
"▁free": 13649,
"eto": 13650,
"ific": 13651,
"▁Beijing": 13652,
"▁us": 13653,
"▁Gob": 13654,
"722": 13655,
"▁Strik": 13656,
"▁och": 13657,
"itar": 13658,
"65)": 13659,
"▁DP": 13660,
"▁Print": 13661,
"ITE": 13662,
"▁Toro": 13663,
"tegi": 13664,
"▁Kari": 13665,
"LED": 13666,
"▁page": 13667,
"757": 13668,
"fred": 13669,
"▁light": 13670,
"▁Xer": 13671,
"▁pano": 13672,
"bita": 13673,
"fel": 13674,
"rii": 13675,
"site": 13676,
"609": 13677,
"hni": 13678,
"▁Nairobi": 13679,
"aku": 13680,
"national": 13681,
"▁Alliance": 13682,
"uka": 13683,
"▁Download": 13684,
"▁ôl": 13685,
"▁Maurice": 13686,
"5-": 13687,
"serie": 13688,
"▁Emp": 13689,
"▁kayak": 13690,
"logen": 13691,
"▁Odd": 13692,
"▁Paz": 13693,
"▁Zach": 13694,
"▁font": 13695,
"▁sale": 13696,
"669": 13697,
"▁lut": 13698,
"▁Stat": 13699,
"139": 13700,
"Sta": 13701,
"116": 13702,
"cliff": 13703,
"lago": 13704,
"▁Ard": 13705,
"▁best": 13706,
"PLA": 13707,
"▁%": 13708,
"776": 13709,
"stia": 13710,
"▁Charm": 13711,
"pica": 13712,
"131": 13713,
"▁8000": 13714,
"▁Jelly": 13715,
"▁have": 13716,
"491": 13717,
"eros": 13718,
"▁Benedict": 13719,
"▁Chocolate": 13720,
"▁Wagner": 13721,
"Wat": 13722,
"uld": 13723,
"▁Maj": 13724,
"▁Casi": 13725,
"Ẵ": 13726,
"▁Join": 13727,
"▁SF": 13728,
"▁Herbal": 13729,
"▁ME": 13730,
"villa": 13731,
"▁Ní": 13732,
"▁OG": 13733,
"▁Sue": 13734,
"▁semi": 13735,
"492": 13736,
"Mil": 13737,
"lita": 13738,
"Wo": 13739,
"▁Marian": 13740,
"1942": 13741,
"255": 13742,
"▁sop": 13743,
"▁try": 13744,
"4.2": 13745,
"▁DAN": 13746,
"▁Metropolitan": 13747,
"▁Ref": 13748,
"▁Roo": 13749,
"▁Asha": 13750,
"▁Living": 13751,
"hey": 13752,
"▁Plo": 13753,
"▁SPF": 13754,
"vá": 13755,
"▁Como": 13756,
"pak": 13757,
"dula": 13758,
"▁bot": 13759,
"▁Mello": 13760,
"▁don": 13761,
"060": 13762,
"537": 13763,
"589": 13764,
"▁olive": 13765,
"▁Mustafa": 13766,
"▁Dutch": 13767,
"▁Xavier": 13768,
"arium": 13769,
"▁Deli": 13770,
"▁Vid": 13771,
"▁Cold": 13772,
"子": 13773,
"OH": 13774,
"▁Flight": 13775,
"634": 13776,
"▁tele": 13777,
"▁Sicilia": 13778,
"▁obscur": 13779,
"kari": 13780,
"▁Lev": 13781,
"▁sten": 13782,
"▁Frei": 13783,
"▁music": 13784,
"eras": 13785,
"fam": 13786,
"▁air": 13787,
"landi": 13788,
"wy": 13789,
"▁Mickey": 13790,
"▁Transit": 13791,
"569": 13792,
"▁Canyon": 13793,
"gene": 13794,
"▁spam": 13795,
"672": 13796,
"mere": 13797,
"▁Brin": 13798,
"▁Epic": 13799,
"▁Vilanova": 13800,
"▁Columbus": 13801,
"▁Rif": 13802,
"▁mode": 13803,
"laus": 13804,
"thú": 13805,
"info": 13806,
"▁Dynamics": 13807,
"ating": 13808,
"rce": 13809,
"hita": 13810,
"trac": 13811,
"vut": 13812,
"▁Building": 13813,
"▁nail": 13814,
"Mü": 13815,
"ifi": 13816,
"598": 13817,
"base": 13818,
"▁Flex": 13819,
"127": 13820,
"▁Taiwan": 13821,
"Ter": 13822,
"brand": 13823,
"Ce": 13824,
"▁Activ": 13825,
"onto": 13826,
"▁Helena": 13827,
"▁León": 13828,
"▁Lig": 13829,
"▁Uz": 13830,
"uso": 13831,
"▁Sino": 13832,
"Myanmar": 13833,
"▁Casino": 13834,
"▁cas": 13835,
"▁down": 13836,
"677": 13837,
"▁life": 13838,
"scar": 13839,
"▁Amazing": 13840,
"▁Weiss": 13841,
"ventus": 13842,
"zana": 13843,
"▁IM": 13844,
"▁KA": 13845,
"▁Wah": 13846,
"▁Castell": 13847,
"▁Zoom": 13848,
"57)": 13849,
"blog": 13850,
"▁Amma": 13851,
"132": 13852,
"zole": 13853,
"lant": 13854,
"rog": 13855,
"▁Sedan": 13856,
"▁ampli": 13857,
"▁Biel": 13858,
"▁vintage": 13859,
"165": 13860,
"▁Signal": 13861,
"amin": 13862,
"▁sì": 13863,
"niki": 13864,
"pati": 13865,
"▁FN": 13866,
"fell": 13867,
"▁Motion": 13868,
"▁Flip": 13869,
"▁rose": 13870,
"▁tom": 13871,
"▁About": 13872,
"▁cacao": 13873,
"▁kung": 13874,
"70)": 13875,
"▁Beh": 13876,
"▁Basic": 13877,
"oria": 13878,
"▁Pine": 13879,
"▁fo": 13880,
"ratti": 13881,
"shir": 13882,
"▁Andersen": 13883,
"▁Heng": 13884,
"▁Rac": 13885,
"Space": 13886,
"▁Georg": 13887,
"▁Policy": 13888,
"▁Wine": 13889,
"▁Baz": 13890,
"▁Dimitri": 13891,
"Sun": 13892,
"▁Sushi": 13893,
"drome": 13894,
"▁Capo": 13895,
"▁Puma": 13896,
"▁lét": 13897,
"ash": 13898,
"host": 13899,
"nau": 13900,
"▁update": 13901,
"bari": 13902,
"▁Hermann": 13903,
"▁Tec": 13904,
"belo": 13905,
"136": 13906,
"▁Batt": 13907,
"▁Nations": 13908,
"▁Giêsu": 13909,
"wazi": 13910,
"size": 13911,
"▁Boca": 13912,
"▁SEC": 13913,
"▁Skr": 13914,
"zhou": 13915,
"bago": 13916,
"▁Cata": 13917,
"▁Stal": 13918,
"▁(1994)": 13919,
"▁There": 13920,
"▁Vene": 13921,
"▁25-": 13922,
"ango": 13923,
"uza": 13924,
"▁Basil": 13925,
"▁film": 13926,
"kram": 13927,
"thra": 13928,
"tang": 13929,
"▁Highland": 13930,
"Chan": 13931,
"kwa": 13932,
"▁Spiel": 13933,
"FK": 13934,
"Les": 13935,
"▁Anu": 13936,
"▁Fis": 13937,
"▁Tep": 13938,
"▁Foreign": 13939,
"▁Goethe": 13940,
"▁Menu": 13941,
"▁Ruben": 13942,
"461": 13943,
"▁Nah": 13944,
"▁11-": 13945,
"▁Josef": 13946,
"▁formal": 13947,
"ouvre": 13948,
"Sh": 13949,
"▁Ent": 13950,
"▁hom": 13951,
"bourg": 13952,
"panda": 13953,
"quel": 13954,
"▁Abel": 13955,
"CCP": 13956,
"Nor": 13957,
"▁BẢN": 13958,
"▁Guillermo": 13959,
"648": 13960,
"dej": 13961,
"▁Brod": 13962,
"▁Huo": 13963,
"▁dop": 13964,
"▁inter": 13965,
"▁star": 13966,
"144": 13967,
"642": 13968,
"659": 13969,
"▁Alessandro": 13970,
"▁60-": 13971,
"▁Spir": 13972,
"▁Vest": 13973,
"▁Hasan": 13974,
"▁Tuli": 13975,
"guin": 13976,
"▁Johor": 13977,
"▁Language": 13978,
"▁Sleep": 13979,
"▁war": 13980,
"1955": 13981,
"▁90-": 13982,
"▁Joyce": 13983,
"141": 13984,
"ogram": 13985,
"cka": 13986,
"▁Drama": 13987,
"▁Rebel": 13988,
"▁Sok": 13989,
"56)": 13990,
"▁Pli": 13991,
"▁Suite": 13992,
"25)": 13993,
"TRO": 13994,
"lane": 13995,
"▁tv": 13996,
"133": 13997,
"rsi": 13998,
"▁Aber": 13999,
"...]": 14000,
"grand": 14001,
"was": 14002,
"587": 14003,
"965": 14004,
"Bas": 14005,
"159": 14006,
"▁HDD": 14007,
"494": 14008,
"pac": 14009,
"tir": 14010,
"27)": 14011,
"145": 14012,
"bek": 14013,
"ete": 14014,
"ingen": 14015,
"▁Versailles": 14016,
"arte": 14017,
"tya": 14018,
"Girl": 14019,
"fighter": 14020,
"nei": 14021,
"▁Griff": 14022,
"vine": 14023,
"▁Pittsburgh": 14024,
"ICT": 14025,
"cept": 14026,
"rul": 14027,
"▁Sven": 14028,
"Paul": 14029,
"TAC": 14030,
"mala": 14031,
"mium": 14032,
"4.0": 14033,
"▁Varga": 14034,
"▁Wander": 14035,
"Ap": 14036,
"ło": 14037,
"▁MAS": 14038,
"▁Verona": 14039,
"ache": 14040,
"ses": 14041,
"▁Oy": 14042,
"▁Rabi": 14043,
"rail": 14044,
"tit": 14045,
"▁Forever": 14046,
"▁Ito": 14047,
"▁Strip": 14048,
"▁UNI": 14049,
"23)": 14050,
"thing": 14051,
"brook": 14052,
"cello": 14053,
"ood": 14054,
"▁Drew": 14055,
"Bru": 14056,
"av": 14057,
"mien": 14058,
"uh": 14059,
"bad": 14060,
"kot": 14061,
"▁Launch": 14062,
"onder": 14063,
"▁Compact": 14064,
"American": 14065,
"fie": 14066,
"iji": 14067,
"▁Sunshine": 14068,
"▁Dub": 14069,
"blad": 14070,
"▁Punjab": 14071,
| |
Item(user_id=1, name="Towels",
description="saw. Hath called ", category=category13)
session.add(item9)
session.commit()
category14 = Category(user_id=1, name="Coins, Stamps & Paper money")
session.add(category14)
session.commit()
item1 = Item(user_id=1, name="Coins",
description="the sea called void ", category=category14)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Coins, Paper Money & Stamps Accessories",
description="earth. Us place seed ", category=category14)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Paper Money",
description="fourth face brought. ", category=category14)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Stamps",
description="second lesser called ", category=category14)
session.add(item4)
session.commit()
category15 = Category(user_id=1, name="Eyewear & Optics")
session.add(category15)
session.commit()
item1 = Item(user_id=1, name="Contact Lenses",
description="brought likeness and ", category=category15)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Eyewear",
description="which signs cattle ", category=category15)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Eyewear Accessories",
description="life green own. ", category=category15)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Glasses Frames",
description="doesn't he morning ", category=category15)
session.add(item4)
session.commit()
category16 = Category(user_id=1, name="Garden & Outdoor")
session.add(category16)
session.commit()
item1 = Item(user_id=1, name="Barbecue Tools & Grill Accessories",
description="You're good living ", category=category16)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Flowers",
description="give morning Stars ", category=category16)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Garden Decoration",
description="earth. Us place seed ", category=category16)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Garden Furniture",
description="behold. Won't ", category=category16)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Gardening & Watering Supplies",
description="stars. Fourth heaven ", category=category16)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Grills & Smokers",
description="there seed also ", category=category16)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Pest Control",
description="living have land ", category=category16)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="Smoking Accessories",
description="greater. Them the ", category=category16)
session.add(item8)
session.commit()
category17 = Category(user_id=1, name="Home Appliances")
session.add(category17)
session.commit()
item1 = Item(user_id=1, name="Heating, Cooling & Air Quality Center",
description="morning fourth ", category=category17)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Irons, Steamers & Sewing",
description="heaven wherein. ", category=category17)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Large Appliances Center",
description="Sixth seasons our ", category=category17)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Parts & Accessories",
description="dry fish replenish ", category=category17)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Small Appliances Center",
description="Our creature wherein ", category=category17)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Televisions",
description="second blessed ", category=category17)
session.add(item6)
session.commit()
category18 = Category(user_id=1, name="Kitchen Appliances")
session.add(category18)
session.commit()
item1 = Item(user_id=1, name="Appliances Parts & Accessories",
description="void give darkness. ", category=category18)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Blenders & Mixers",
description="tree. One subdue had ", category=category18)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Coffee & Espresso Makers",
description="creeping. whales ", category=category18)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Dishwashers",
description="be them meat waters ", category=category18)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Electric Meat Grinders",
description="all seas fish. Upon. ", category=category18)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Electric Slicers",
description="let lesser fifth ", category=category18)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Food Preparation",
description="whales moving heaven ", category=category18)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="Food Processors",
description="give he. Moveth ", category=category18)
session.add(item8)
session.commit()
item9 = Item(user_id=1, name="Fryers",
description="one. All he saying ", category=category18)
session.add(item9)
session.commit()
item10 = Item(user_id=1, name="Ice Cream Makers",
description="Good. Land. Own ", category=category18)
session.add(item10)
session.commit()
item11 = Item(user_id=1, name="Juicers & Presses",
description="you're fowl appear ", category=category18)
session.add(item11)
session.commit()
item12 = Item(user_id=1, name="Kettles",
description="replenish she'd ", category=category18)
session.add(item12)
session.commit()
item13 = Item(user_id=1, name="Kitchen Scales",
description="earth. And. Made ", category=category18)
session.add(item13)
session.commit()
item14 = Item(user_id=1, name="Microwaves",
description="Second. Cattle ", category=category18)
session.add(item14)
session.commit()
item15 = Item(user_id=1, name="Ovens, Ranges & Stoves",
description="dry. You're. two ", category=category18)
session.add(item15)
session.commit()
item16 = Item(user_id=1, name="Range Hoods",
description="brought whales. his. ", category=category18)
session.add(item16)
session.commit()
item17 = Item(user_id=1, name="Refrigerators & Freezers",
description="Lights creepeth own ", category=category18)
session.add(item17)
session.commit()
item18 = Item(user_id=1, name="Rice Cooker",
description="Our creature wherein ", category=category18)
session.add(item18)
session.commit()
item19 = Item(user_id=1, name="Sandwich & Waffle Makers",
description="together fruit Fly ", category=category18)
session.add(item19)
session.commit()
item20 = Item(user_id=1, name="Slow Cookers",
description="great. Thing moved ", category=category18)
session.add(item20)
session.commit()
item21 = Item(user_id=1, name="Specialty Kitchen Appliances",
description="Divided upon give ", category=category18)
session.add(item21)
session.commit()
item22 = Item(user_id=1, name="Steamers",
description="Beginning was give ", category=category18)
session.add(item22)
session.commit()
item23 = Item(user_id=1, name="Toasters",
description="fourth years i a. ", category=category18)
session.add(item23)
session.commit()
item24 = Item(user_id=1, name="Water Coolers & Dispensers",
description="made two gathered ", category=category18)
session.add(item24)
session.commit()
category19 = Category(user_id=1, name="Music & Movies")
session.add(category19)
session.commit()
item1 = Item(user_id=1, name="Guitars",
description="lesser two lights ", category=category19)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Movies, Plays & Series",
description="you're fruit tree ", category=category19)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Music CDs",
description="winged winged two ", category=category19)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Musical Instruments",
description="waters night. From ", category=category19)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Musical Instruments Parts & Accessories",
description="fourth the the don't ", category=category19)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Music Keyboards",
description="creature fish face ", category=category19)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Tuners",
description="Two isn't you're ", category=category19)
session.add(item7)
session.commit()
category20 = Category(user_id=1, name="Pet Food & Supplies")
session.add(category20)
session.commit()
item1 = Item(user_id=1, name="Pet & Animal Food",
description="Thing under. ", category=category20)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Pet & Animal Supplies",
description="of upon replenish ", category=category20)
session.add(item2)
session.commit()
category21 = Category(user_id=1, name="Toys")
session.add(category21)
session.commit()
item1 = Item(user_id=1, name="Bikes, Scooters & Ride-Ons",
description="Itself open Make ", category=category21)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Costumes",
description="tree years. Tree ", category=category21)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Toys",
description="fruitful. Bearing ", category=category21)
session.add(item3)
session.commit()
category22 = Category(user_id=1, name="Baby")
session.add(category22)
session.commit()
item1 = Item(user_id=1, name="Baby Accessories",
description="Creeping fish don't ", category=category22)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Baby Bags",
description="heaven fly firmament ", category=category22)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Baby Bath & Skincare",
description="Under the a ", category=category22)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Baby Clothing & Shoes",
description="heaven. His sea ", category=category22)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Baby Gear",
description="abundantly fruit ", category=category22)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Baby Gift Sets",
description="heaven. Beast ", category=category22)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Baby Safety & Health",
description="brought be bring ", category=category22)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="Baby Toys & Accessories",
description="let also female ", category=category22)
session.add(item8)
session.commit()
item9 = Item(user_id=1, name="Diapers",
description="moved above won't ", category=category22)
session.add(item9)
session.commit()
item10 = Item(user_id=1, name="Feeding",
description="evening. Creeping ", category=category22)
session.add(item10)
session.commit()
item11 = Item(user_id=1, name="Nursery Furniture",
description="yielding very second ", category=category22)
session.add(item11)
session.commit()
category23 = Category(user_id=1, name="Books")
session.add(category23)
session.commit()
item1 = Item(user_id=1, name="Business & Trade Books",
description="kind green saying ", category=category23)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Children's Books",
description="stars whales. Lights ", category=category23)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Comics & Graphic Novels",
description="heaven. His sea ", category=category23)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Education, Learning & Self Help Books",
description="female form created ", category=category23)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Lifestyle Books",
description="whose beast brought ", category=category23)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Literature & Fiction",
description="Which. Without be ", category=category23)
session.add(item6)
session.commit()
category24 = Category(user_id=1, name="Computers, IT & Networking")
session.add(category24)
session.commit()
item1 = Item(user_id=1, name="Computer & Laptop Accessories",
description="open she'd. beast ", category=category24)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Computer Parts & Components",
description="likeness darkness ", category=category24)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Computers & Servers",
description="replenish beginning. ", category=category24)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Networking & Accessories",
description="saying. Seasons ", category=category24)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Printers, Scanners, Hardware & Accessories",
description="seas created had ", category=category24)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Software",
description="brought years whales ", category=category24)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Laptops & Netbooks",
description="behold god fill ", category=category24)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="VR Gadgets",
description="good stars deep it ", category=category24)
session.add(item8)
session.commit()
category25 = Category(user_id=1, name="Furniture")
session.add(category25)
session.commit()
item1 = Item(user_id=1, name="Bedroom Sets",
description="multiply herb. Land ", category=category25)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Beds & Bed Frames",
description="his under were ", category=category25)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Chairs & Benches",
description="gathered. Fish upon ", category=category25)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Garden Furniture",
description="Shall they're set ", category=category25)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Kitchen & Dining Rooms Sets",
description="void give darkness. ", category=category25)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Living Room Sets",
description="Bring Seasons cattle ", category=category25)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Office Furniture",
description="replenish made face ", category=category25)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="Sofas, Bean Bags & Ottomans",
description="moved that made may ", category=category25)
session.add(item8)
session.commit()
item9 = Item(user_id=1, name="Storage & Organization",
description="called he meat may ", category=category25)
session.add(item9)
session.commit()
item10 = Item(user_id=1, name="Tables",
description="morning creepeth ", category=category25)
session.add(item10)
session.commit()
category26 = Category(user_id=1, name="Grocery, Food & Beverages")
session.add(category26)
session.commit()
item1 = Item(user_id=1, name="Air Fresheners Center",
description="seas lesser morning ", category=category26)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Baby Bath & Skin Care Center",
description="fourth gathering to ", category=category26)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Baby Food Center",
description="have herb divide ", category=category26)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Bakery Center",
description="evening i image ", category=category26)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Beverage Center",
description="created fill his ", category=category26)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Breakfast",
description="their. Third shall ", category=category26)
session.add(item6)
session.commit()
item7 = Item(user_id=1, name="Cereals & Grains Center",
description="itself Whales beast ", category=category26)
session.add(item7)
session.commit()
item8 = Item(user_id=1, name="Cleaning Products Center",
description="darkness Had. herb i ", category=category26)
session.add(item8)
session.commit()
item9 = Item(user_id=1, name="Confectionery Center",
description="Good. Land. Own ", category=category26)
session.add(item9)
session.commit()
item10 = Item(user_id=1, name="Dairy Product Center",
description="green void. He. ", category=category26)
session.add(item10)
session.commit()
item11 = Item(user_id=1, name="Dry Food",
description="replenish sixth ", category=category26)
session.add(item11)
session.commit()
item12 = Item(user_id=1, name="Fruits & Vegetables Center",
description="lesser give tree. ", category=category26)
session.add(item12)
session.commit()
item13 = Item(user_id=1, name="Ghee Center",
description="make have him Air ", category=category26)
session.add(item13)
session.commit()
item14 = Item(user_id=1, name="Hair Care Center",
description="called. Male form ", category=category26)
session.add(item14)
session.commit()
item15 = Item(user_id=1, name="Makeup Center",
description="winged itself beast ", category=category26)
session.add(item15)
session.commit()
item16 = Item(user_id=1, name="Meats & Chicken Center",
description="whales evening ", category=category26)
session.add(item16)
session.commit()
item17 = Item(user_id=1, name="Personal Care",
description="moving lesser lesser ", category=category26)
session.add(item17)
session.commit()
item18 = Item(user_id=1, name="Pet Food Center",
description="our were. Darkness ", category=category26)
session.add(item18)
session.commit()
item19 = Item(user_id=1, name="Plastic & Paper Products Center",
description="yielding years ", category=category26)
session.add(item19)
session.commit()
item20 = Item(user_id=1, name="Seafood Center",
description="gathered yielding. ", category=category26)
session.add(item20)
session.commit()
item21 = Item(user_id=1, name="Seasoning, Spices & Preservatives",
description="fruit after waters ", category=category26)
session.add(item21)
session.commit()
item22 = Item(user_id=1, name="Skin Care Center",
description="together is winged ", category=category26)
session.add(item22)
session.commit()
category27 = Category(user_id=1, name="Home Decor & Furniture")
session.add(category27)
session.commit()
item1 = Item(user_id=1, name="Home Decor Center",
description="Fish place behold ", category=category27)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Lamps & Lighting",
description="divide living set ", category=category27)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Small Furniture",
description="for evening from ", category=category27)
session.add(item3)
session.commit()
category28 = Category(user_id=1, name="Kitchen & Home Supplies")
session.add(category28)
session.commit()
item1 = Item(user_id=1, name="Bakeware & Accessories",
description="multiply after ", category=category28)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Cleaning Products Center",
description="great sixth hath ", category=category28)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Cooking Utensils",
description="divided lesser every ", category=category28)
session.add(item3)
session.commit()
item4 = Item(user_id=1, name="Cookware & Bakeware",
description="gathered. forth ", category=category28)
session.add(item4)
session.commit()
item5 = Item(user_id=1, name="Cutlery & Flatware Set",
description="called. Male form ", category=category28)
session.add(item5)
session.commit()
item6 = Item(user_id=1, name="Dinnerware & Serveware",
description="Bearing to. winged | |
certain syntax prefix. For example <b>set-prefix</b> 1
"add" will cause breakpoint 1 only to stop if the instruction begins
with "add". The text to compare the prefix with for an instruction is
the one which the instruction is disassembled to.
Set prefix to the empty string ("") to remove this extra condition.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1311")
#
# -------------------- set-substr --------------------
#
def set_substr_cmd(id, substr):
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
if not (bp[2] & 4):
print "This can only be applied to execution breakpoints (access type x)."
return
bp[8] = substr
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("set-substr", set_substr_cmd,
[arg(int_t, "id"), arg(str_t, "substr")],
type = ["Breakpoints", "Debugging"],
short = "set a syntax substring for a breakpoint",
doc_items = [('NOTE', 'Only supported for execution breakpoints.')],
see_also = ['set-prefix', 'set-pattern'],
doc = """
When set Simics will only break on instructions with a certain syntax
substring. For example <b>set-substr</b> 1 "r31" will make breakpoint 1 only
stop if the instruction has a substring "r31".
Set sub-string to the empty string ("") to remove this extra condition.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1341")
#
# -------------------- set-pattern --------------------
#
def set_pattern_cmd(id, pattern, mask):
if len(pattern) % 2 == 1 or len(mask) % 2 == 1:
print "Pattern and mask must have a length that corresponds to one or several bytes."
return
try:
bp = SIM_get_attribute_idx(conf.sim, "breakpoints", id)
if not (bp[2] & 4):
print "This can only be applied to execution breakpoints (access type x)."
return
bp[9] = pattern
bp[10] = mask
SIM_set_attribute_idx(conf.sim, "breakpoints", id, bp)
except Exception, msg:
print msg
new_command("set-pattern", set_pattern_cmd,
[arg(int_t, "id"), arg(str_t, "pattern"), arg(str_t, "mask")],
type = ["Breakpoints", "Debugging"],
short = "set an instruction pattern for a breakpoint",
doc_items = [('NOTE', 'Only supported for execution breakpoints.')],
see_also = ['set-prefix', 'set-substr'],
doc = """
When set for breakpoint <i>id</i> Simics will only break on
instructions with a certain bit-pattern. First the <i>mask</i> will be
applied to the instruction and then the result will be compared with
the <i>pattern</i>. For example <b>set-pattern</b> 1 "0x0100" "0x0101"
will specialize breakpoint 1 to break on instructions whose first byte
has the lowest bit set and the second not.
Since an instruction may be longer than the longest supported
integer in the frontend, both pattern and mask must be supplied
as strings.
Set pattern and mask to the empty string ("") to remove this extra condition.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1375")
#
# -------------------- list-breakpoints --------------------
#
def list_breakpoints_cmd(all):
found = 0
assert_cpu()
bp_list = conf.sim.breakpoints[:]
for (id, type, access, hits, activate_at, active, flags, prefix, substr,
pattern, mask, obj) in bp_list:
if flags & (Sim_Breakpoint_Simulation | Sim_Breakpoint_Private) and not all:
continue
ranges = obj.breakpoints[id]
if len(ranges) == 1:
print
continue
acc = ""
if access & Sim_Access_Read: acc = acc + "r"
if access & Sim_Access_Write: acc = acc + "w"
if access & Sim_Access_Execute: acc = acc + "x"
if not found:
print (" Id Type Enb %-*s %-*s Hits Space"
% (18, "Start", 18, "Stop"))
found = 1
pr("%3d %-8s %-3s 0x%016x 0x%016x %6d %s\n" %
(id,
iff(type == Sim_Break_Physical,
"phys",
iff(type == Sim_Break_Virtual, "virt", "lin")) + "-" + acc,
iff(active,"yes","no"),
ranges[1],
ranges[2],
hits,
obj.name))
for r in range(3, len(ranges), 2):
print "%s0x%016x 0x%016x" % (" "*17, ranges[r], ranges[r + 1])
if activate_at > hits:
print " Ignore count:", number_str(activate_at - hits - 1, 10)
if prefix:
print " Prefix:", prefix
if substr:
print " Substr:", substr
if pattern:
print " Pattern: 0x%s, Mask: 0x%s" % (pattern, mask)
time_bp = 0
for cpu in all_processors():
for q in (Sim_Queue_Step, Sim_Queue_Time):
for (obj, desc, time) in cpu.event_desc[q]:
if not obj and desc.startswith("User breakpoint"):
if q == Sim_Queue_Time:
unit = "cycle"
else:
unit = "step"
if found and not time_bp:
print
time_bp = 1
print ("Breakpoint at %-5s %s (%s)"
% (unit, number_str(time, 10), cpu.name))
if not found and not time_bp:
print "No breakpoints set."
return
new_command("list-breakpoints", list_breakpoints_cmd,
[arg(flag_t, "-all")],
alias = ["ib", "info-breakpoints"],
type = ["Breakpoints", "Debugging"],
short = "print information about breakpoints",
see_also = ['<breakpoint>.break', 'delete', 'enable', 'ignore', 'set-prefix', 'set-substr', 'set-pattern'],
doc = """
Prints information about all breakpoints set. The following
information is printed for memory breakpoints: the id (used by other
commands to refer to the breakpoint), if the breakpoint is set on
physical or virtual addresses and the access type (r = read, w =
write, or x = execute), if enabled (see the <b>enable</b> command),
the address range of the breakpoint, how many times the breakpoint
has been triggered, and what memory space or context object it is set in.
If prefix, substring and/or pattern conditions are set it will be
printed as well (see <b>set-prefix</b>, <b>set-substr</b> and
<b>set-pattern</b> command).
Time breakpoints are also listed.
If <arg>-all</arg> is passed as argument, <cmd>list-breakpoints</cmd> will also
list all internal breakpoints set for simulation purposes.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1471")
#
# -------------------- map --------------------
#
def map_cmd(obj):
map = obj.map
map.sort()
print "base object fn offs length"
for line in map:
print "0x%016x %-20s %2d 0x%-16x 0x%-16x" % (line[0], line[1].name,
line[2], line[3],
line[4])
if len(line) > 5:
# map in port-space does not have all fields
if line[5] and line[5] != line[1]:
print " target -> %s" % line[5].name
if line[6] != 0:
print " priority %d" % line[6]
if line[7] != 0:
output = " width %d bytes" % line[7]
if line[8] == Sim_Swap_Bus:
output += ", byte swap on bus width"
elif line[8] == Sim_Swap_Trans:
output += ", byte swap on transaction size"
elif line[8] == Sim_Swap_Bus_Trans:
output += ", byte swap on bus width and transaction size"
print output
try:
deftarg = obj.default_target
if deftarg:
print "%-18s %-20s %2d 0x%-16x %-18s" % ("- default -", deftarg[0].name,
deftarg[1], deftarg[2], '-')
if deftarg[3]:
print " target -> %s" % deftarg[3].name
except AttributeError:
pass
new_command("map", map_cmd,
[],
namespace = "memory-space",
type = ["Memory", "Configuration", "Inspecting Simulated State"],
short = "list memory map",
see_also = ['<memory-space>.add-map', '<memory-space>.del-map'],
doc = """
Prints the memory map of the memory space object, one line per entry
in the map attribute of the memory space. The <em>base</em> column is
the starting address of the map. The <em>object</em> column contains
the object mapped at that address. <em>fn</em> is the function number
and <em>offs</em> is the offset for the object. <em>length</em> is the
number of bytes mapped.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1534")
new_command("map", map_cmd,
[],
namespace = "port-space",
short = "list port map",
type = ["Memory", "Configuration", "Inspecting Simulated State"],
see_also = ['<port-space>.add-map', '<port-space>.del-map'],
doc = """
Prints the port map of the port space object, one line per entry
in the map attribute of the port space. The <em>base</em> column is
the starting address of the map. The <em>object</em> column contains
the object mapped at that address. <em>fn</em> is the function number
and <em>offs</em> is the offset for the object. <em>length</em> is the
number of bytes mapped.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="1549")
#
# add-map / del-map in memory-space
#
swap_names = {}
swap_names['none'] = Sim_Swap_None
swap_names['bus'] = Sim_Swap_Bus
swap_names['bus-trans'] = Sim_Swap_Bus_Trans
swap_names['trans'] = Sim_Swap_Trans
def add_map_cmd(space, object, base, length, fn, offset, target,
pri, align_size, swap):
if swap not in swap_names:
print "Unknown byte swapping requested: '%s'" % swap
SIM_command_has_problem()
return
try:
space.map += [[base, object, fn, offset, length, target, pri,
align_size, swap_names[swap]]]
except Exception, msg:
print "Failed mapping '%s' in '%s': %s" % (object.name,
space.name, msg)
SIM_command_has_problem()
return
else:
print "Mapped '%s' in '%s' at address 0x%x." % (object.name,
space.name, base)
def swap_expander(comp):
return get_completions(comp, swap_names)
new_command("add-map", add_map_cmd,
[arg(obj_t('object'), 'device'),
arg(uint64_t, 'base'),
arg(uint64_t, 'length'),
arg(int_t, 'function', '?', 0),
arg(uint64_t, 'offset', '?', 0),
arg(obj_t('object'), 'target', '?', None),
arg(int_t, 'priority', '?', 0),
arg(uint64_t, 'align-size', '?', 8),
arg(str_t, 'swap', '?', 'none', expander = swap_expander)],
namespace = "memory-space",
type = ["Memory", "Configuration"],
see_also = ['<memory-space>.map', '<memory-space>.del-map'],
short = "map device in a memory-space",
doc = """
Map <param>device</param> into a memory-space at address <param>base</param>
and with length <param>length</param>. Different mappings of the same device
may be indentified by a device specific <param>function</param> number. For
translator and bridge mappings, a <param>target</param> device should be given.
The mapping may specify an offset into the device's memory space, using the
<param>offset</param> argument. If several device mappings overlap, the
<param>priority</param> is used to select what device will receive memory
accesses. The priority is an integer between 0 and 255, where 0 is highest.
For devices that do not support large accesses, the <param>align-size</param>
governs | |
float(line_split[1])
p_w_neut = float(line_split[2])
p_w_neg = float(line_split[3])
for index, element in enumerate(ppmi_matrix[i]):
if element != 0:
f = open(sentiment_lexicon)
lines = f.readlines()
cword = lines[index].split()
pos_weight = float(cword[1])
neut_weight = float(cword[2])
neg_weight = float(cword[3])
pos = element * pos_weight
neut = element * neut_weight
neg = element * neg_weight
p_w_pos += pos
p_w_neut += neut
p_w_neg += neg
file.write(word + " " + str(round(p_w_pos,3)) + " " + str(round(p_w_neut, 3)) + " " + str(round(p_w_neg, 3)) + "\n")
file.close()
#function to estimate precision, recall and accuracy of our sentiment analysis
def analyze(int_data, data, name, filename):
"""
To analyze the results of our sentiment analysis we will make use of the gold-system-labels matrix.
(gold labels: sentiment assigned intellectually; system labels: sentiment assigned by algorithm)
We will check how often our algorithm assigned a positive sentiment to a positive tweet, a neutral sentiment to a positive tweet etc.
With these values we can determine the precision, recall and accuracy of our algorithm
We will then make tables (confusion matrix, contingency tables, pooled table) of our data and save it as a txtfile.
Confusion Matrix: Contingency Table (pos): Pooled Table:
NB | pos | neut | neg pos | yes | no | yes | no
----------------------------------- ------------------- --------------------
pos | | | yes | | yes | |
----------------------------------- ------------------- --------------------
neut | | | no | | no | |
-----------------------------------
neg | | |
Macroaverage Precision (C.Tables): Macroaverage Recall: Macroaverage Accuracy:
Microaverage Precision (P.Table): Microaverage Recall: Microaverage Accuracy:
"""
with open(int_data, 'r') as csvfile:
int_reader = csv.reader(csvfile, delimiter=';')
int_reader = list(int_reader)
pos_pos = 0 #values for confusion matrix
pos_neut = 0
pos_neg = 0
neut_pos = 0
neut_neut = 0
neut_neg = 0
neg_pos = 0
neg_neut = 0
neg_neg = 0
with open(data, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
reader = list(reader)
for i, row in enumerate(int_reader):
if row[0] == 'positiv':
if reader[i][0] == 'positiv':
pos_pos += 1
elif reader[i][0] == 'neutral':
pos_neut += 1
else:
pos_neg += 1
elif row[0] == 'neutral':
if reader[i][0] == 'positiv':
neut_pos += 1
elif reader[i][0] == 'neutral':
neut_neut += 1
else:
neut_neg += 1
else:
if reader[i][0] == 'positiv':
neg_pos += 1
elif reader[i][0] == 'neutral':
neg_neut += 1
else:
neg_neg += 1
#values for contingency tables
pos_y_y = pos_pos #gold: positive, system: positive
pos_y_n = pos_neut + pos_neg #gold: positive, system: neutral or negative
pos_n_y = neut_pos + neg_pos #gold: neutral or negative, system: positive
pos_n_n = neut_neut + neut_neg + neg_neut + neg_neg #gold: neutral or negative, system: neutral or negative
neut_y_y = neut_neut
neut_y_n = neut_pos + neut_neg
neut_n_y = pos_neut + neg_neut
neut_n_n = pos_pos + pos_neg + neg_pos + neg_neg
neg_y_y = neg_neg
neg_y_n = neg_pos + neg_neut
neg_n_y = pos_neg + neut_neg
neg_n_n = pos_pos + pos_neut + neut_neut + neut_pos
#values for pooled table
y_y = pos_y_y + neut_y_y + neg_y_y
y_n = pos_y_n + neut_y_n + neg_y_n
n_y = pos_n_y + neut_n_y + neut_n_y
n_n = pos_n_n + neut_n_n + neg_n_n
pos_precision = pos_y_y / Decimal(pos_y_y + pos_n_y)
neut_precision = neut_y_y / Decimal(neut_y_y + neut_n_y)
neg_precision = neg_y_y / Decimal(neg_y_y + neg_n_y)
mac_precision = (pos_precision + neut_precision + neg_precision) / 3 #Macroaverage Precision
mic_precision = y_y / Decimal(y_y + n_y) #Microaverage Precision
global mac_precisions
mac_precisions[filename] = mac_precision
global mic_precisions
mic_precisions[filename] = mic_precision
pos_recall = pos_y_y / Decimal(pos_y_y + pos_y_n)
neut_recall = neut_y_y / Decimal(neut_y_y + neut_y_n)
neg_recall = neg_y_y / Decimal(neg_y_y + neg_y_n)
mac_recall = (pos_recall + neut_recall + neg_recall) / 3 #Macroaverage Recall
mic_recall = y_y / Decimal(y_y + y_n) #Microaverage Recall
global mac_recalls
mac_recalls[filename] = mac_recall
global mic_recalls
mic_recalls[filename] = mic_recall
pos_acc = (pos_y_y + pos_n_n) / Decimal(pos_y_y + pos_y_n + pos_n_y + pos_n_n)
neut_acc = (neut_y_y + neut_n_n) / Decimal(neut_y_y + neut_y_n + neut_n_y + neut_n_n)
neg_acc = (neg_y_y + neg_n_n) / Decimal(neg_y_y + neg_y_n + neg_n_y + neg_n_n)
mac_acc = (pos_acc + neut_acc + neg_acc) / 3 #Microaverage Accuracy
mic_acc = (y_y + n_n) / Decimal(y_y + y_n + n_y + n_n) #Macroaverage Accuracy
global mac_accs
mac_accs[filename] = mac_acc
global mic_accs
mic_accs[filename] = mic_acc
#creating and saving the tables
file = open(filename, "w")
file.write(name + ': ' + '\n')
file.write('Confusion Matrix:' + '\n')
confusion_table = [['pos', pos_pos, neut_pos, neg_pos], ['neut', pos_neut, neut_neut, neg_neut], ['neg', pos_neg, neut_neg, neg_neg]]
confusion_header = [name, 'pos', 'neut', 'neg']
file.write(tabulate(confusion_table, confusion_header, tablefmt="grid") + '\n' + '\n')
file.write('Contingency Table Positive:' + '\n')
contingency_table_pos = [['yes', pos_y_y, pos_n_y], ['no', pos_y_n, pos_n_n]]
contingency_header_pos = ['pos', 'yes', 'no']
file.write(tabulate(contingency_table_pos, contingency_header_pos, tablefmt="grid") + '\n' + '\n')
file.write('Contingency Table Neutral:' + '\n')
contingency_table_neut = [['yes', neut_y_y, neut_n_y], ['no', neut_y_n, neut_n_n]]
contingency_header_neut = ['neut', 'yes', 'no']
file.write(tabulate(contingency_table_neut, contingency_header_neut, tablefmt="grid") + '\n' + '\n')
file.write('Contingency Table Negative:' + '\n')
contingency_table_neg = [['yes', neg_y_y, neg_n_y], ['no', neg_y_n, neg_n_n]]
contingency_header_neg = ['neg', 'yes', 'no']
file.write(tabulate(contingency_table_neg, contingency_header_neg, tablefmt="grid") + '\n' + '\n')
file.write('Pooled Table:' + '\n')
pooled_table = [['yes', y_y, n_y], ['no', y_n, n_n]]
pooled_header = [' ', 'yes', 'no']
file.write(tabulate(pooled_table, pooled_header, tablefmt="grid") + '\n' + '\n')
file.write('Macroaverage Precision: ' + str(round(mac_precision, 3)) + ' ' + 'Macroaverage Recall: ' + str(round(mac_recall, 3)) + ' ' + 'Macroaverage Accuracy: ' + str(round(mac_acc, 3)) + '\n')
file.write('Microaverage Precision: ' + str(round(mic_precision, 3)) + ' ' + 'Microaverage Recall: ' + str(round(mic_recall, 3)) + ' ' + 'Microaverage Accuracy: ' + str(round(mic_acc, 3)))
file.close()
#function to visualize our results via plots
def visualize(int_data, data, data2, data3, name, name2, name3, a_name, a_name2, a_name3, a_name4, a_name5, filename):
"""
To visualize our results we will create bar charts and pie charts, which will then be saved into one pdffile.
"""
with PdfPages(filename) as pdf:
"""
The following solution for creating bar charts (for example line 693 to 708 and the other equivalent code snippets) is
based on the code example of the matplotlib.
Link: http://matplotlib.org/examples/api/barchart_demo.html
"""
#bar chart to compare the macro- and microaverage precision of Naive Bayes, MaxEnt and SVM
num = 3
mac_precisions_list = (mac_precisions[a_name], mac_precisions[a_name2], mac_precisions[a_name3])
x_locate = np.arange(num)
width = 0.35
fig, ax = plt.subplots()
rec = ax.bar(x_locate, mac_precisions_list, width, color='b')
mic_precisions_list = (mic_precisions[a_name], mic_precisions[a_name2], mic_precisions[a_name3])
rec2 = ax.bar(x_locate + width, mic_precisions_list, width, color='g')
ax.set_title('Macro- and Microaverage Precision of NB, MaxEnt and SVM')
ax.set_xticks(x_locate + width)
ax.set_xticklabels(('NB', 'MaxEnt', 'SVM'))
ax.legend((rec[0], rec2[0]), ('Macroaverage Precision', 'Microaverage Precision'))
axes = plt.gca()
axes.set_ylim([0,1])
pdf.savefig()
plt.close()
#bar chart to compare the macro- and microaverage recall of Naive Bayes, MaxEnt and SVM
mac_recalls_list = (mac_recalls[a_name], mac_recalls[a_name2], mac_recalls[a_name3])
fig, ax = plt.subplots()
rec = ax.bar(x_locate, mac_recalls_list, width, color='b')
mic_recalls_list = (mic_recalls[a_name], mic_recalls[a_name2], mic_recalls[a_name3])
rec2 = ax.bar(x_locate + width, mic_recalls_list, width, color='g')
ax.set_title('Macro- and Microaverage Recall of NB, MaxEnt and SVM')
ax.set_xticks(x_locate + width)
ax.set_xticklabels(('NB', 'MaxEnt', 'SVM'))
ax.legend((rec[0], rec2[0]), ('Macroaverage Recall', 'Microaverage Recall'))
axes = plt.gca()
axes.set_ylim([0,1])
pdf.savefig()
plt.close()
#bar chart to compare the macro- and microaverage accuracy of Naive Bayes, MaxEnt and SVM
mac_accs_list = (mac_accs[a_name], mac_accs[a_name2], mac_accs[a_name3])
fig, ax = plt.subplots()
rec = ax.bar(x_locate, mac_accs_list, width, color='b')
mic_accs_list = (mic_accs[a_name], mic_accs[a_name2], mic_accs[a_name3])
rec2 = ax.bar(x_locate + width, mic_accs_list, width, color='g')
ax.set_title('Macro- and Microaverage Accuracy of NB, MaxEnt and SVM')
ax.set_xticks(x_locate + width)
ax.set_xticklabels(('NB', 'MaxEnt', 'SVM'))
ax.legend((rec[0], rec2[0]), ('Macroaverage Accuracy', 'Microaverage Accuracy'))
axes = plt.gca()
axes.set_ylim([0,1])
pdf.savefig()
plt.close()
"""
The following solution for creating pie charts (for example line 762 to 773 and the other equivalent code snippets) is
based on the code example of the matplotlib.
Link: http://matplotlib.org/examples/pie_and_polar_charts/pie_demo_features.html
"""
#pie chart to show the proportion between positive, neutral and negative sentiment assigned intellectually
pos = 0
neut = 0
neg = 0
with open(int_data, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
if row[0] == 'positiv':
pos += 1
elif row[0] | |
__all__ = ['biblio_parser',
'build_institutions_dic',
'build_title_keywords',
'check_and_drop_columns',
'country_normalization',
'extend_author_institutions',
'getting_secondary_inst_list',
'merge_database',
'name_normalizer',
'normalize_journal_names',
'setting_secondary_inst_filter',
'upgrade_col_names',
]
#
# Globals used from BiblioAnalysis_Utils.BiblioGeneralGlobals: ALIAS_UK, CHANGE, COUNTRIES,
# Globals used from BiblioAnalysis_Utils.BiblioSpecificGlobals: BLACKLISTED_WORDS, COL_NAMES,
# DIC_INST_FILENAME, DIC_LOW_WORDS, DIC_OUTDIR_PARSING ,
# INST_FILTER_LIST, REP_UTILS,
# NLTK_VALID_TAG_LIST, NOUN_MINIMUM_OCCURRENCES,
# RE_NUM_CONF, RE_YEAR_JOURNAL,
# SCOPUS, USECOLS_SCOPUS, WOS
# Functions used from BiblioAnalysis_Utils.BiblioGui: Select_multi_items
# Functions used from BiblioAnalysis_Utils.BiblioParsingScopus: biblio_parser_scopus
# Functions used from BiblioAnalysis_Utils.BiblioParsingWos: biblio_parser_wos
def build_title_keywords(df):
'''Given the dataframe 'df' with one column 'title':
Title
0 Experimental and CFD investigation of inert be...
1 Impact of Silicon/Graphite Composite Electrode...
the function 'build_title_keywords':
1- Builds the set "keywords_TK" of the tokens appearing at least NOUN_MINIMUM_OCCURRENCE times
in all the article titles of the corpus. The tokens are the words of the title with nltk tags
belonging to the global list 'NLTK_VALID_TAG_LIST'.
2- Adds two columns 'token' and 'pub_token' to the dataframe 'df'. The column 'token' contains
the set of the tokenized and lemmelized (using the nltk WordNetLemmatizer) title. The column
'pub_token' contains the list of words common to the set "keywords_TK" and to the column 'kept_tokens'
3- Buids the list of tuples 'list_of_words_occurrences.sort'
[(token_1,# occurrences token_1), (token_2,# occurrences token_2),...] ordered by decreasing values
of # occurrences token_i.
4- Suppress words pertening to BLACKLISTED_WORDS to the list from the bag of words
Args:
df (dataframe): pub_id | Title
Returns:
df (dataframe): pub_id | title_token | kept_tokens where title_token is the list of token of the title
and kept_token the list of tokens with a frequency occurrence >= NOUN_MINIMUM_OCCURRENCES
bag_of_words_occurrences (list of tuples): [(word_1,# occurrence_1), (word_2,# occurrence_2), ...]
'''
# Standard library imports
import operator
from collections import Counter
# 3rd party imports
import nltk
import numpy as np
# Local imports
from BiblioAnalysis_Utils.BiblioSpecificGlobals import NLTK_VALID_TAG_LIST
from BiblioAnalysis_Utils.BiblioSpecificGlobals import NOUN_MINIMUM_OCCURRENCES
from BiblioAnalysis_Utils.BiblioSpecificGlobals import BLACKLISTED_WORDS
def tokenizer(text):
'''
Tokenizes, lemmelizes the string 'text'. Only the words with nltk tags in the global
NLTK_VALID_TAG_LIST are kept.
ex 'Thermal stability of Mg2Si0.55Sn0.45 for thermoelectric applications'
gives the list : ['thermal', 'stability', 'mg2si0.55sn0.45', 'thermoelectric', 'application']
Args:
text (string): string to tokenize
Returns
The list valid_words_lemmatized
'''
tokenized = nltk.word_tokenize(text.lower())
valid_words = [word for (word, pos) in nltk.pos_tag(tokenized)
if pos in NLTK_VALID_TAG_LIST]
stemmer = nltk.stem.WordNetLemmatizer()
valid_words_lemmatized = [stemmer.lemmatize(valid_word) for valid_word in valid_words]
return valid_words_lemmatized
df['title_token'] = df['Title'].apply(tokenizer)
bag_of_words = np.array(df.title_token.sum()) # remove the blacklisted words from the bag of words
for remove in BLACKLISTED_WORDS:
bag_of_words = bag_of_words[bag_of_words != remove]
bag_of_words_occurrences = list(Counter(bag_of_words).items())
bag_of_words_occurrences.sort(key = operator.itemgetter(1),reverse=True)
keywords_TK = set([x for x,y in bag_of_words_occurrences if y>=NOUN_MINIMUM_OCCURRENCES])
df['kept_tokens'] = df['title_token'].apply(lambda x :list(keywords_TK.intersection(set(x))))
return df,bag_of_words_occurrences
def country_normalization(country):
'''
Normalize the country name for coherence seeking between wos and scopus corpuses.
'''
# Local imports
from BiblioAnalysis_Utils.BiblioGeneralGlobals import ALIAS_UK
from BiblioAnalysis_Utils.BiblioGeneralGlobals import COUNTRIES
country_clean = country
if country not in COUNTRIES:
if country in ALIAS_UK:
country_clean = 'United Kingdom'
elif 'USA' in country:
country_clean = 'United States'
elif ('china' in country) or ('China' in country):
country_clean = 'China'
elif country == 'Russia':
country_clean = 'Russian Federation'
elif country == 'U Arab Emirates':
country_clean = 'United Arab Emirates'
elif country == 'Vietnam':
country_clean = 'Viet Nam'
else:
country_clean = ''
return country_clean
def build_institutions_dic(rep_utils = None, dic_inst_filename = None):
'''
The `builds_institutions_dic` fuction builds the dict 'inst_dic'
giving the mormalized names of institutions from a csv file `dic_inst_filename`.
The name of the csv file is set in the `DIC_INST_FILENAME` global.
Args:
rep_utils (str): name of the folder where the csv file is stored
dic_inst_filename (str): name of the csv file.
Returns:
`dict`: `inst_dic` as {raw_inst:norm_inst} where
- raw_inst a raw institution name
- norm_inst is the normalized institution name.
Note:
The globals `REP_UTILS` and `DIC_INST_FILENAME` are used.
'''
# Standard library imports
from pathlib import Path
# 3rd party imports
import pandas as pd
# Local imports
from BiblioAnalysis_Utils.BiblioSpecificGlobals import DIC_INST_FILENAME
from BiblioAnalysis_Utils.BiblioSpecificGlobals import REP_UTILS
if dic_inst_filename == None: dic_inst_filename = DIC_INST_FILENAME
if rep_utils == None: rep_utils = REP_UTILS
# Setting the file path for dic_inst_filename file reading
path_dic_inst = Path(__file__).parent / rep_utils / Path(dic_inst_filename)
# Reading and cleaning the dic_inst_filename file
inst_dic = pd.read_csv(path_dic_inst,sep=':',header=None,encoding='latin1')
inst_dic.sort_values([0],inplace=True)
inst_dic[0] = inst_dic[0].str.strip()
inst_dic[1] = inst_dic[1].str.strip()
inst_dic = dict(zip(inst_dic[0],inst_dic[1]))
return inst_dic
def setting_secondary_inst_filter(out_dir_parsing):
'''The `setting_secondary_inst_filter` function allows building the affiliation filter "inst_filter_list"
fron the institutions list of the corpus using the `Select_multi_items` GUI.
Args:
out_dir_parsing (path): the corpus parsing path for reading the "DIC_OUTDIR_PARSING['I2']" file.
Returns:
(list): list of tuples (institution,country) selected by the user.
Notes:
The globals 'COL_NAMES'and 'DIC_OUTDIR_PARSING' are used.
The function `Select_multi_items`is used from `BiblioAnalysis_utils` package.
'''
# Standard library imports
from pathlib import Path
# 3rd party imports
import numpy as np
import pandas as pd
# Local imports
from BiblioAnalysis_Utils.BiblioGui import Select_multi_items
from BiblioAnalysis_Utils.BiblioSpecificGlobals import COL_NAMES
from BiblioAnalysis_Utils.BiblioSpecificGlobals import DIC_OUTDIR_PARSING
institutions_alias = COL_NAMES['auth_inst'][4]
country_alias = COL_NAMES['country'][2]
df_auth_inst = pd.read_csv(Path(out_dir_parsing) / Path(DIC_OUTDIR_PARSING['I2']),
sep = '\t')
raw_institutions_list = []
for auth_inst in df_auth_inst[institutions_alias]:
raw_institutions_list.append(auth_inst)
institutions_list = list(np.concatenate([raw_inst.split(';') for raw_inst in raw_institutions_list]))
institutions_list = sorted(list(set(institutions_list)))
country_institution_list = [x.split('_')[1] + ':' + x.split('_')[0] for x in institutions_list]
country_institution_list = sorted(country_institution_list)
selected_list = Select_multi_items(country_institution_list,
mode='multiple',
fact=2,
win_widthmm=80,
win_heightmm=100,
font_size=16)
inst_filter_list = [(x.split(':')[1].strip(),x.split(':')[0].strip()) for x in selected_list]
return inst_filter_list
def merge_database(database,filename,in_dir,out_dir):
'''Merges several databases in one database
Args:
database (string): database type (scopus or wos)
filename (str): name of the merged database
in_dir (str): name of the folder where the databases are stored
out_dir (str): name of the folder where the merged databases will be stored
Notes:
The USECOLS_SCOPUS global is used.
'''
# Standard library imports
import os
from pathlib import Path
import sys
# 3rd party imports
import pandas as pd
# Local imports
from BiblioAnalysis_Utils.BiblioSpecificGlobals import SCOPUS
from BiblioAnalysis_Utils.BiblioSpecificGlobals import USECOLS_SCOPUS
from BiblioAnalysis_Utils.BiblioSpecificGlobals import WOS
list_data_base = []
list_df = []
if database == WOS:
for path, _, files in os.walk(in_dir):
list_data_base.extend(Path(path) / Path(file) for file in files
if file.endswith(".txt"))
for file in list_data_base:
list_df.append(read_database_wos(file))
elif database == SCOPUS:
for path, _, files in os.walk(in_dir):
list_data_base.extend(Path(path) / Path(file) for file in files
if file.endswith(".csv"))
for file in list_data_base:
df = pd.read_csv(file,usecols=USECOLS_SCOPUS) # reads the database
list_df.append(df)
else:
raise Exception(f"Sorry, unrecognized database {database} : should be {WOS} or {SCOPUS} ")
result = pd.concat(list_df,ignore_index=True)
result.to_csv(out_dir / Path(filename),sep='\t')
def name_normalizer(text):
'''Normalizes the author name spelling according the three debatable rules:
- replacing none ascii letters by ascii ones
- capitalizing first name
- capitalizing surnames
- supressing comma and dot
ex: name_normalizer(" <NAME>, E-kj. ")
>>> "<NAME> E-KJ"
Args:
text (str): text to normalize
Returns
The normalized text
Notes:
The CHANGE global is used.
'''
# Standard library imports
import functools
import re
import unicodedata
# Local imports
from BiblioAnalysis_Utils.BiblioGeneralGlobals import CHANGE
nfc = functools.partial(unicodedata.normalize,'NFD')
text = text.translate(CHANGE) # Translate special character using global CHANGE dict
text = nfc(text). \
encode('ascii', 'ignore'). \
decode('utf-8').\
strip()
re_minus = re.compile('(-[a-zA-Z]+)') # Captures: "cCc-cC-ccc-CCc"
for text_minus_texts in re.findall(re_minus,text):
text = text.replace(text_minus_texts,'-' + text_minus_texts[1:].capitalize() )
re_apostrophe = re.compile("('[a-zA-Z]+)") # Captures: "cCc'cC'ccc'cc'CCc"
for text_minus_texts in re.findall(re_apostrophe,text):
text = text.replace(text_minus_texts,"'" + text_minus_texts[1:].capitalize() )
re_minus = re.compile('([a-zA-Z]+-)') # Captures: "cCc-"
for text_minus_texts in re.findall(re_minus,text):
text = text.replace(text_minus_texts,text_minus_texts[:-1].capitalize() + '-')
re_apostrophe = re.compile("([a-zA-Z]+')") # Captures: "cCc'"
for text_minus_texts in re.findall(re_apostrophe,text):
text = text.replace(text_minus_texts,text_minus_texts[:-1].capitalize() + "'")
re_surname = "[a-zA-Z]+\s" # Captures: "cCccC "
for text_minus_texts in re.findall(re_surname,text):
text = text.replace(text_minus_texts,text_minus_texts.capitalize())
re_minus_first_name = '\s[a-zA-Z]+-[a-zA-Z]+$' # Captures: "cCc-cC" in the first name
for x in re.findall(re_minus_first_name,text):
text = text.replace(x,x.upper())
return text
def normalize_journal_names(database,df_corpus):
'''The `normalize_journal_names` function normalizes the journal names in the journals specific column
| |
%s' % ('one', "two"))"""
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.fstringify(contents, tokens, tree)
self.assertEqual(results, expected)
#@-others
#@+node:ekr.20200107174645.1: *3* class TestOrange (BaseTest)
class TestOrange(BaseTest):
"""
Tests for the Orange class.
**Important**: All unit tests assume that black_mode is False.
That is, unit tests assume that no blank lines
are ever inserted or deleted.
"""
#@+others
#@+node:ekr.20200115201823.1: *4* TestOrange.blacken
def blacken(self, contents, line_length=None):
"""Return the results of running black on contents"""
if not black:
self.skipTest('Can not import black')
# Suppress string normalization!
try:
mode = black.FileMode()
mode.string_normalization = False
if line_length is not None:
mode.line_length = line_length
except TypeError:
self.skipTest('old version of black')
return black.format_str(contents, mode=mode)
#@+node:ekr.20200228074455.1: *4* TestOrange.test_bug_1429
def test_bug_1429(self):
contents = r'''\
def get_semver(tag):
"""bug 1429 docstring"""
try:
import semantic_version
version = str(semantic_version.Version.coerce(tag, partial=True))
# tuple of major, minor, build, pre-release, patch
# 5.6b2 --> 5.6-b2
except(ImportError, ValueError) as err:
print('\n', err)
print("""*** Failed to parse Semantic Version from git tag '{0}'.
Expecting tag name like '5.7b2', 'leo-4.9.12', 'v4.3' for releases.
This version can't be uploaded to PyPi.org.""".format(tag))
version = tag
return version
'''
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree,
max_join_line_length=0, max_split_line_length=0)
self.assertEqual(results, expected)
#@+node:ekr.20210318055702.1: *4* TestOrange.test_bug_1851
def test_bug_1851(self):
contents = r'''\
def foo(a1):
pass
'''
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree,
max_join_line_length=0, max_split_line_length=0)
self.assertEqual(results, expected)
#@+node:ekr.20200219114415.1: *4* TestOrange.test_at_doc_part
def test_at_doc_part(self):
line_length = 40 # For testing.
contents = """\
#@+at Line 1
# Line 2
#@@c
print('hi')
"""
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
self.assertEqual(results, expected)
#@+node:ekr.20200116102345.1: *4* TestOrange.test_backslash_newline
def test_backslash_newline(self):
"""
This test is necessarily different from black, because orange doesn't
delete semicolon tokens.
"""
contents = r"""
print(a);\
print(b)
print(c); \
print(d)
"""
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
# expected = self.blacken(contents).rstrip() + '\n'
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200219145639.1: *4* TestOrange.test_blank_lines_after_function
def test_blank_lines_after_function(self):
contents = """\
# Comment line 1.
# Comment line 2.
def spam():
pass
# Properly indented comment.
# Comment line3.
# Comment line4.
a = 2
"""
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200220050758.1: *4* TestOrange.test_blank_lines_after_function_2
def test_blank_lines_after_function_2(self):
contents = """\
# Leading comment line 1.
# Leading comment lines 2.
def spam():
pass
# Trailing comment line.
a = 2
"""
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200220053212.1: *4* TestOrange.test_blank_lines_after_function_3
def test_blank_lines_after_function_3(self):
# From leoAtFile.py.
contents = r"""\
def writeAsisNode(self, p):
print('1')
def put(s):
print('2')
# Trailing comment 1.
# Trailing comment 2.
print('3')
"""
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200210120455.1: *4* TestOrange.test_decorator
def test_decorator(self):
table = (
# Case 0.
"""\
@my_decorator(1)
def func():
pass
""",
# Case 1.
"""\
if 1:
@my_decorator
def func():
pass
""",
# Case 2.
'''\
@g.commander_command('promote')
def promote(self, event=None, undoFlag=True, redrawFlag=True):
"""Make all children of the selected nodes siblings of the selected node."""
''',
)
for i, contents in enumerate(table):
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.beautify(contents, tokens, tree)
if results != expected:
g.trace('Fail:', i)
self.assertEqual(results, expected)
#@+node:ekr.20200211094614.1: *4* TestOrange.test_dont_delete_blank_lines
def test_dont_delete_blank_lines(self):
line_length = 40 # For testing.
contents = """\
class Test:
def test_func():
pass
a = 2
"""
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
self.assertEqual(results, expected)
#@+node:ekr.20200116110652.1: *4* TestOrange.test_function_defs
def test_function_defs(self):
table = (
# Case 0.
"""\
def f1(a=2 + 5):
pass
""",
# Case 2
"""\
def f1():
pass
""",
# Case 3.
"""\
def f1():
pass
""",
# Case 4.
'''\
def should_kill_beautify(p):
"""Return True if p.b contains @killbeautify"""
return 'killbeautify' in g.get_directives_dict(p)
''',
)
for i, contents in enumerate(table):
contents, tokens, tree = self.make_data(contents)
expected = self.blacken(contents).rstrip() + '\n'
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200209152745.1: *4* TestOrange.test_indented_comment
def test_indented_comment(self):
line_length = 40 # For testing.
table = (
"""\
if 1:
pass
# An indented comment.
""",
"""\
table = (
# Indented comment.
)
"""
)
fails = 0
for contents in table:
contents, tokens, tree = self.make_data(contents)
expected = contents
if 0:
dump_contents(contents)
dump_tokens(tokens)
# dump_tree(tokens, tree)
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
message = (
f"\n"
f" contents: {contents!r}\n"
f" expected: {expected!r}\n"
f" got: {results!r}")
if results != expected:
fails += 1
print(f"Fail: {fails}\n{message}")
elif 0:
print(f"Ok:\n{message}")
assert not fails, fails
#@+node:ekr.20200116104031.1: *4* TestOrange.test_join_and_strip_condition
def test_join_and_strip_condition(self):
contents = """\
if (
a == b or
c == d
):
pass
"""
expected = """\
if (a == b or c == d):
pass
"""
contents, tokens, tree = self.make_data(contents)
expected = textwrap.dedent(expected)
# Black also removes parens, which is beyond our scope at present.
# expected = self.blacken(contents, line_length=40)
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200208041446.1: *4* TestOrange.test_join_leading_whitespace
def test_join_leading_whitespace(self):
line_length = 40 # For testing.
table = (
#1234567890x1234567890x1234567890x1234567890x
"""\
if 1:
print('4444',
'5555')
""",
"""\
if 1:
print('4444', '5555')\n""",
)
fails = 0
for contents in table:
contents, tokens, tree = self.make_data(contents)
if 0:
dump_contents(contents)
dump_tokens(tokens)
# dump_tree(tokens, tree)
expected = contents
# expected = self.blacken(contents, line_length=line_length)
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
message = (
f"\n"
f" contents: {contents!r}\n"
f" expected: {expected!r}\n"
f" got: {results!r}")
if results != expected:
fails += 1
print(f"Fail: {fails}\n{message}")
elif 0:
print(f"Ok:\n{message}")
assert not fails, fails
#@+node:ekr.20200121093134.1: *4* TestOrange.test_join_lines
def test_join_lines(self):
# Except where noted, all entries are expected values....
line_length = 40 # For testing.
table = (
#1234567890x1234567890x1234567890x1234567890x
"""print('4444',\n '5555')""",
"""print('4444', '5555')\n""",
)
fails = 0
for contents in table:
contents, tokens, tree = self.make_data(contents)
if 0:
dump_contents(contents)
dump_tokens(tokens)
# dump_tree(tokens, tree)
expected = contents
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
message = (
f"\n"
f" contents: {contents!r}\n"
f" expected: {expected!r}\n"
f" orange: {results!r}")
if results != expected:
fails += 1
print(f"Fail: {fails}\n{message}")
elif 0:
print(f"Ok:\n{message}")
self.assertEqual(fails, 0)
#@+node:ekr.20200210051900.1: *4* TestOrange.test_join_suppression
def test_join_suppression(self):
contents = """\
class T:
a = 1
print(
a
)
"""
expected = """\
class T:
a = 1
print(a)
"""
contents, tokens, tree = self.make_data(contents)
expected = textwrap.dedent(expected)
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200207093606.1: *4* TestOrange.test_join_too_long_lines
def test_join_too_long_lines(self):
# Except where noted, all entries are expected values....
line_length = 40 # For testing.
table = (
#1234567890x1234567890x1234567890x1234567890x
(
"""print('aaaaaaaaaaaa',\n 'bbbbbbbbbbbb', 'cccccccccccccccc')""",
"""print('aaaaaaaaaaaa',\n 'bbbbbbbbbbbb', 'cccccccccccccccc')\n""",
),
)
fails = 0
for contents, expected in table:
contents, tokens, tree = self.make_data(contents)
if 0:
dump_contents(contents)
dump_tokens(tokens)
# dump_tree(tokens, tree)
results = self.beautify(contents, tokens, tree,
max_join_line_length=line_length,
max_split_line_length=line_length,
)
message = (
f"\n"
f" contents: {contents!r}\n"
f" expected: {expected!r}\n"
f" got: {results!r}")
if results != expected:
fails += 1
print(f"Fail: {fails}\n{message}")
elif 0:
print(f"Ok:\n{message}")
assert not fails, fails
#@+node:ekr.20200108075541.1: *4* TestOrange.test_leo_sentinels
def test_leo_sentinels_1(self):
# Careful: don't put a sentinel into the file directly.
# That would corrupt leoAst.py.
sentinel = '#@+node:ekr.20200105143308.54: ** test'
contents = f"""\
{sentinel}
def spam():
pass
"""
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200209155457.1: *4* TestOrange.test_leo_sentinels_2
def test_leo_sentinels_2(self):
# Careful: don't put a sentinel into the file directly.
# That would corrupt leoAst.py.
sentinel = '#@+node:ekr.20200105143308.54: ** test'
contents = f"""\
{sentinel}
class TestClass:
pass
"""
contents, tokens, tree = self.make_data(contents)
expected = contents.rstrip() + '\n'
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200108082833.1: *4* TestOrange.test_lines_before_class
def test_lines_before_class(self):
contents = """\
a = 2
class aClass:
pass
"""
contents, tokens, tree = self.make_data(contents)
expected = contents
results = self.beautify(contents, tokens, tree)
self.assertEqual(results, expected)
#@+node:ekr.20200110014220.86: *4* TestOrange.test_multi_line_pet_peeves
def test_multi_line_pet_peeves(self):
contents = """\
if x == 4: pass
if x == 4 : pass
print (x, y); x, y = y, x
print (x , y) ; x | |
Space": 0xAAB7C1,
"Calmness": 0x68A895,
"Calthan Brown": 0x6D5044,
"Calypso": 0x3D7188,
"Calypso Berry": 0xC53A4B,
"Calypso Blue": 0x347D8B,
"Calypso Coral": 0xEE5C6C,
"Calypso Green": 0x2E5F60,
"Calypso Red": 0xDE6B66,
"Camaron Pink": 0xFE828C,
"Camarone": 0x206937,
"Cambridge Blue": 0xA3C1AD,
"Cambridge Leather": 0x8C633C,
"Camel": 0xC69F59,
"Camel Brown": 0xA56639,
"Camel Cardinal": 0xCC9944,
"Camel Cord": 0xE0CB82,
"Camel Fur": 0xBB6600,
"Camel Hair": 0xDBB8A4,
"Camel Hair Coat": 0xF5B784,
"Camel Hide": 0xC1AA91,
"Camel Red": 0xE5743B,
"Camel Spider": 0xAF8751,
"Camel Toe": 0xAC8A2A,
"Camel Train": 0xBAAE9D,
"Camel's Hump": 0x817667,
"Camelback": 0xC5AA85,
"Camelback Mountain": 0xD3B587,
"Camellia": 0xF6745F,
"Camellia Pink": 0xCD739D,
"Camellia Rose": 0xEB6081,
"Camelot": 0x803A4B,
"Camembert": 0xFBF3DF,
"Cameo": 0xF2DEBC,
"Cameo Appearance": 0xDFC1C3,
"Cameo Blue": 0x769DA6,
"Cameo Brown": 0xC08A80,
"Cameo Green": 0xDCE6E5,
"Cameo Peach": 0xEBCFC9,
"Cameo Pink": 0xEFBBCC,
"Cameo Role": 0xDDCAAF,
"Cameo Rose": 0xF7DFD7,
"Cameo Stone": 0xEBDFD8,
"Cameroon Green": 0x60746D,
"Camisole": 0xFCD9C7,
"Camo": 0x7F8F4E,
"Camo Beige": 0x8C8475,
"Camo Clay": 0x747F71,
"Camo Green": 0xA5A542,
"Camouflage": 0x3C3910,
"Camouflage Green": 0x4B6113,
"Camouflage Olive": 0xA28F5C,
"Campanelle Noodle": 0xFCF7DB,
"Campánula": 0x3272AF,
"Campanula Purple": 0x6C6D94,
"Campfire": 0xCE5F38,
"Campfire Ash": 0xDDD9CE,
"Campfire Blaze": 0xB67656,
"Campfire Smoke": 0xD5D1CB,
"Campground": 0xD0A569,
"Camping Tent": 0xB6AFA0,
"Camping Trip": 0x67786E,
"Can Can": 0xD08A9B,
"Canada Goose Eggs": 0xEAE2DD,
"Canadian Lake": 0x8F9AA4,
"Canadian Maple": 0xCAB266,
"Canadian Pancake": 0xEDD8C3,
"Canadian Pine": 0x2E7B52,
"Canadian Voodoo Grey": 0xB8B7A3,
"Canal Blue": 0x9CC2C5,
"Canal Street": 0x969281,
"Canaletto": 0x818C72,
"Canary": 0xFDFF63,
"Canary Diamond": 0xFFCE52,
"Canary Feather": 0xEFDE75,
"Canary Grass": 0xD0CCA9,
"Canary Green": 0xD6DEC9,
"Canary Island": 0xE9D4A9,
"Canary Wharf": 0x91A1B5,
"Canary Yellow": 0xFFDF01,
"Cancun Sand": 0xFBEDD7,
"Candela": 0xBAC4D5,
"Candelabra": 0xE1C161,
"Candid Blue": 0x6CC3E0,
"Candidate": 0xC3BC90,
"Candied Apple": 0xB95B6D,
"Candied Blueberry": 0x331166,
"Candied Ginger": 0xBFA387,
"Candied Snow": 0xD8FFF3,
"Candied Yam": 0xF4935B,
"Candied Yams": 0xF9A765,
"Candle Bark": 0xC3BDAA,
"Candle Flame": 0xFFF4A1,
"Candle Glow": 0xFFE8C3,
"Candle in the Wind": 0xF9EBBF,
"Candle Light": 0xDDC1A6,
"Candle Wax": 0xF2EACF,
"Candle Yellow": 0xE09B6E,
"Candlelight": 0xFCD917,
"Candlelight Dinner": 0xCEB3BE,
"Candlelight Ivory": 0xFCF4E2,
"Candlelight Peach": 0xF8A39D,
"Candlelight Yellow": 0xF7F0C7,
"Candlelit Beige": 0xF1EDE0,
"Candlestick Point": 0xFFF1D5,
"Candlewick": 0xF2EBD3,
"Candy": 0xFF9B87,
"Candy Apple Red": 0xFF0800,
"Candy Bar": 0xFFB7D5,
"Candy Cane": 0xF7BFC2,
"Candy Coated": 0xEF9FAA,
"Candy Corn": 0xFCFC5D,
"Candy Drop": 0xC25D6A,
"Candy Floss": 0xE8A7E2,
"Candy Grape Fizz": 0x7755EE,
"Candy Grass": 0x33AA00,
"Candy Green": 0x33CC00,
"Candy Heart Pink": 0xF5A2A1,
"Candy Mix": 0xF3DFE3,
"Candy Pink": 0xFF63E9,
"Candy Tuft": 0xF1D7E4,
"Candy Violet": 0x895D8B,
"Candyman": 0xFF9E76,
"Candytuft": 0xEDC9D8,
"Cane Sugar": 0xE3B982,
"Cane Sugar Glaze": 0xDDBB99,
"Cane Toad": 0x977042,
"Caneel Bay": 0x00849F,
"Canewood": 0xD7B69A,
"Cannery Park": 0xBCB09E,
"Cannoli Cream": 0xF0EFE2,
"Cannon Ball": 0x484335,
"Cannon Barrel": 0x3C4142,
"Cannon Black": 0x251706,
"Cannon Grey": 0x646C64,
"Cannon Pink": 0x8E5164,
"Canoe": 0xDDC49E,
"Canoe Blue": 0x1D5671,
"Canopy": 0x728F02,
"Cantaloupe": 0xFFD479,
"Cantaloupe Slice": 0xFEB079,
"Cantankerous Coyote": 0xAC8D74,
"Canteen": 0x5E5347,
"Canter Peach": 0xF6D3BB,
"Cantera": 0xCEC5AF,
"Canterbury Bells": 0xB9C3E6,
"Canterbury Cathedral": 0xB2AB94,
"Canton": 0x6DA29E,
"Canton Jade": 0xBAE7C7,
"Canvas": 0xBB8855,
"Canvas Cloth": 0xE6DFD2,
"Canvas Luggage": 0xE2D7C6,
"Canvas Satchel": 0xCCB88D,
"Canvas Tan": 0xDDD6C6,
"Canyon Blue": 0x607B8E,
"Canyon Clay": 0xCE8477,
"Canyon Cliffs": 0xECE3D1,
"Canyon Cloud": 0xAEAFBB,
"Canyon Dusk": 0xDDC3B7,
"Canyon Echo": 0xE5E1CC,
"Canyon Falls": 0x97987F,
"Canyon Iris": 0x49548F,
"Canyon Mist": 0xA7A4C0,
"Canyon Peach": 0xEEDACB,
"Canyon Rose": 0xAF6C67,
"Canyon Sand": 0xF2D6AA,
"Canyon Stone": 0x93625B,
"Canyon Sunset": 0xE1927A,
"Canyon Trail": 0xD6B8A9,
"Canyon Verde": 0x8A7E5C,
"Canyon View": 0xC3B39F,
"Canyon Wind": 0xE3E5DF,
"Canyonville": 0xF5DED1,
"Cǎo Lǜ Grass": 0x1FA774,
"Cape Cod": 0x4E5552,
"Cape Cod Bay": 0x557080,
"Cape Cod Blue": 0x91A2A6,
"Cape Honey": 0xFEE0A5,
"Cape Hope": 0xD8D6D7,
"Cape Jasmine": 0xFFB95A,
"Cape Lee": 0x50818B,
"Cape Palliser": 0x75482F,
"Cape Pond": 0x0092AD,
"Cape Verde": 0x01554F,
"Capella": 0xD9CED2,
"Caper": 0xAFC182,
"Caper Green": 0x847640,
"Cap<NAME>": 0x78728C,
"Capers": 0x695E4B,
"Capetown Cream": 0xFCEBCE,
"Capital Blue": 0x1A4157,
"Capital Grains": 0xDBD0A8,
"Capital Yellow": 0xE6BA45,
"Capitalino Cactus": 0x008F4C,
"Capocollo": 0xD9544D,
"Caponata": 0x822A10,
"Cappuccino": 0x633F33,
"Cappuccino Bombe": 0xB4897D,
"Cappuccino Froth": 0xC8B089,
"Capri": 0x00BFFF,
"Capri Breeze": 0x008799,
"Capri Cream": 0xF1F0D6,
"Capri Fashion Pink": 0xAC839C,
"Capri Isle": 0x4F5855,
"Capri Water Blue": 0xABE2D6,
"Capricious Purple": 0xBB00DD,
"Caps": 0x7E7A75,
"Capsella": 0x6D8A74,
"Capsicum Red": 0x76392E,
"Capstan": 0x007EB0,
"Captain Blue": 0x005171,
"Captain Kirk": 0x9B870C,
"Captain Nemo": 0x828080,
"Captains Blue": 0x557088,
"Captivated": 0x947CAE,
"Captivating Cream": 0xF4D9B1,
"Captive": 0x005B6A,
"Capture": 0x2CBAA3,
"Capulet Olive": 0x656344,
"Caput Mortuum": 0x592720,
"Caput Mortuum Grey Red": 0x6F585B,
"Carafe": 0x5D473A,
"Caraïbe": 0x795F4D,
"Carambar": 0x552233,
"Carambola": 0xEFEBD1,
"Caramel": 0xAF6F09,
"Caramel Apple": 0xB87A59,
"Caramel Bar": 0xCC8654,
"Caramel Brown": 0xB18775,
"Caramel Cafe": 0x864C24,
"Caramel Candy": 0xB3715D,
"Caramel Cloud": 0xD4AF85,
"Caramel Coating": 0xBB7711,
"Caramel Cream": 0xF4BA94,
"Caramel Crumb": 0xC39355,
"Caramel Cupcake": 0xB98C5D,
"Caramel Finish": 0xFFD59A,
"Caramel Ice": 0xEEC9AA,
"Caramel Infused": 0xCC7755,
"Caramel Kiss": 0xB08A61,
"Caramel Latte": 0x8C6342,
"Caramel Macchiato": 0xC58D4B,
"Caramel Milk": 0xDDC283,
"Caramel Powder": 0xEEBB99,
"Caramel Sauce": 0xB3804D,
"Caramel Sundae": 0xA9876A,
"Caramel Swirl": 0x8F6A4F,
"Caramelized": 0xBA947F,
"Caramelized Orange": 0xEF924A,
"Caramelized Pears": 0xE7D5AD,
"Caramelized Pecan": 0xA17B4D,
"Caramelized Walnut": 0x6E564A,
"<NAME>": 0xD69E6B,
"Caraquenian Crimson": 0x9C0013,
"<NAME>": 0x8C6E54,
"Caraway": 0xA19473,
"<NAME>": 0x6D563C,
"Caraway Seeds": 0xDFD5BB,
"Carbon": 0x333333,
"Carbon Copy": 0x545554,
"Carbon Dating": 0x565B58,
"Carbon Footprint": 0x7B808B,
"Card Table Green": 0x00512C,
"Cardamom": 0xAAAA77,
"Cardamom Green": 0x989057,
"Cardamom Spice": 0x837165,
"Cardboard": 0xC19A6C,
"Cardin Green": 0x1B3427,
"Cardinal": 0xC41E3A,
"Cardinal Mauve": 0x2C284C,
"Cardinal Pink": 0x8C055E,
"Cardinal Red": 0x9B365E,
"Cardoon": 0x9AAE8C,
"Cardueline Finch": 0x957B38,
"Carefree": 0xDCE9E9,
"Carefree Sky": 0xA6CDDE,
"Careys Pink": 0xC99AA0,
"Cargo": 0x8F755B,
"Cargo Green": 0xC8C5A7,
"Cargo Pants": 0xCDC4AE,
"Cargo River": 0xCFCDBB,
"Caribbean Blue": 0x1AC1DD,
"Caribbean Coast": 0x93C5DD,
"Caribbean Coral": 0xC07761,
"Caribbean Cruise": 0x3F9DA9,
"Caribbean Current": 0x006E6E,
"Caribbean Green": 0x00CC99,
"Caribbean Mist": 0xCADEEA,
"Caribbean Pleasure": 0xD5DCCE,
"Caribbean Sea": 0x00819D,
"Caribbean Sky": 0x819ECB,
"Caribbean Splash": 0x00697C,
"Caribbean Sunrise": 0xF5DAAA,
"Caribbean Swim": 0x126366,
"Caribbean Turquoise": 0x009D94,
"Caribe": 0x147D87,
"Caribou": 0x816D5E,
"Caribou Herd": 0xCDA563,
"Carissima": 0xE68095,
"Carla": 0xF5F9CB,
"Carley's Rose": 0xA87376,
"Carlisle": 0x45867C,
"Carmel": 0x915F3D,
"Carmel Mission": 0x927F76,
"Carmel Woods": 0x8D6B3B,
"Carmelite": 0xB98970,
"Carmen": 0x7C383F,
"Carmen Miranda": 0x903E2F,
"Carmim": 0xA13905,
"Carmine": 0x9D0216,
"Carmine Carnation": 0xAD4B53,
"Carmine Pink": 0xEB4C42,
"Carmine Red": 0xFF0038,
"Carmine Rose": 0xE35B8F,
"Carmoisine": 0xB31C45,
"Carnaby Tan": 0x5B3A24,
"Carnage Red": 0x940008,
"Carnal Brown": 0xBB8866,
"Carnal Pink": 0xEF9CB5,
"Carnation": 0xFD798F,
"Carnation Bloom": 0xF9C0BE,
"Carnation Bouquet": 0xF5C0D0,
"Carnation Coral": 0xEDB9AD,
"Carnation Festival": 0x915870,
"Carnation Pink": 0xFF7FA7,
"Carnation Rose": 0xCE94C2,
"Carnelian": 0xB31B1B,
"Carnival": 0xEB882C,
"Carnival Night": 0x006E7A,
"Carnivore": 0x991111,
"Caro": 0xFFCAC3,
"<NAME>": 0x855C4C,
"Carob Chip": 0x5A484B,
"Carol": 0x338DAE,
"Carol's Purr": 0x77A135,
"Carolina": 0xCBEFCB,
"Carolina Blue": 0x8AB8FE,
"Carolina Green": 0x008B6D,
"Carolina Parakeet": 0xD8DF80,
"Carolina Reaper": 0xFF1500,
"Carona": 0xFBA52E,
"Carotene": 0xFDB793,
"Carousel Pink": 0xF8DBE0,
"Carpaccio": 0xE34234,
"<NAME>": 0x905755,
"Carpet Moss": 0x00AA33,
"Carrageen Moss": 0x905D36,
"Carrara": 0xEEEBE4,
"Carrara Marble": 0xE8E7D7,
"Carriage": 0x6C6358,
"Carriage Door": 0x958D79,
"Carriage Green": 0x254D48,
"Carriage Red": 0x8C403D,
"Carriage Ride": 0x8A8DC4,
"Carriage Stone": 0x7E7265,
"Carriage Yellow": 0xFFB756,
"Carrier Pigeon Blue": 0x889398,
"Carroburg Crimson": 0xA82A70,
"Carrot": 0xFD6F3B,
"Carrot Cake": 0xBF6F31,
"Carrot Curl": 0xFE8C18,
"Carrot Flower": 0xCBD3C1,
"Carrot Orange": 0xED9121,
"Carrot Stick": 0xDF7836,
"Carte Blanche": 0xEEEEFF,
"Carter's Scroll": 0x405978,
"Carton": 0xBB9E7E,
"Cartwheel": 0x665537,
"Carved Wood": 0x937A62,
"Carving Party": 0xF0C39F,
"Casa Blanca": 0xF4ECD8,
"Casa De Oro": 0xCF6837,
"Casa del Mar": 0xCACFE6,
"Casa Talec": 0xC49CA5,
"Casa Verde": 0xABB790,
"Casablanca": 0xF0B253,
"Casal": 0x3F545A,
"Casandora Yellow": 0xFECE5A,
"Casandra": 0x7C4549,
"Cascade": 0xD4EDE6,
"Cascade Beige": 0xE7DBCA,
"Cascade Green": 0xA1C2B9,
"Cascade Tour": 0x697F8E,
"Cascade White": 0xECF2EC,
"Cascades": 0x273E3E,
"Cascading White": 0xF7F5F6,
"Cascara": 0xEE4433,
"Cashew": 0xA47149,
"Cashew Cheese": 0xFCF9BD,
"Cashew Nut": 0xEDCCB3,
"Cashmere": 0xD1B399,
"Cashmere Blue": 0xA5B8D0,
"Cashmere Rose": 0xCE879F,
"Cashmere Sweater": 0xFEF2D2,
"Casket": 0xA49186,
"Casper": 0xAAB5B8,
"Caspian Sea": 0x4F6F91,
"Caspian Tide": 0xAEC7DB,
"Cassandra's Curse": 0xBB7700,
"Cassava Cake": 0xE7C084,
"Cassia Buds": 0xE0CDDA,
"Cassiopeia": 0xAED0C9,
"Cassiterite Brown": 0x623C1F,
"Cast Iron": 0x64645A,
"Castaway": 0x6DBAC0,
"Castaway Beach": 0xD0C19F,
"Castaway Cove": 0x7A9291,
"Castaway Lagoon": 0x607374,
"Castellan Green": 0x455440,
"Castellina": 0xA27040,
"Caster Sugar": 0xFFFFE8,
"Castilian Pink": 0xD4B3AA,
"Casting Sea": 0x4586C7,
"Casting Shadow": 0x9DA7A0,
"Castle Beige": 0xE0D5CA,
"Castle Hill": 0x95827B,
"Castle In The Clouds": 0xEFDCCA,
"Castle in the Sky": 0xD1EAED,
"Castle Mist": 0xBDAEB7,
"Castle Moat": 0x8B6B47,
"Castle Path": 0xC5BAAA,
"Castle Ridge": 0xEADEC7,
"Castle Stone": 0x525746,
"Castle Wall": 0xC8C1AB,
"Castlegate": 0xA0A5A5,
"Castlerock": 0x5F5E62,
"Castleton Green": 0x00564F,
"Castlevania Heart": 0xA80020,
"Castor Grey": 0x646762,
"Castro": 0x44232F,
"Casual Blue": 0x498090,
"Casual Day": 0x95BAC2,
"Casual Elegance": 0xDFD5C8,
"Casual Grey": 0xA09D98,
"Casual Khaki": 0xD3C5AF,
"Cat Person": 0x636D70,
"Cat's Eye Marble": 0xD6A75D,
"Cat's Purr": 0x0071A0,
"Catachan Green": 0x475742,
"Catacomb Bone": 0xE2DCCC,
"Catacomb Walls": 0xDBD7D0,
"Catalan": 0x429395,
"Catalina": 0x72A49F,
"Catalina Blue": 0x062A78,
"Catalina Coast": 0x5C7884,
"Catalina Green": 0x859475,
"Catalina Tile": 0xEFAC73,
"Catarina Green": 0x90C4B4,
"Catawba": 0x703642,
"Catawba Grape": 0x5D3C43,
"Catch The Wave": 0xB5DCD8,
"Caterpillar": 0x66A545,
"Caterpillar Green": 0x146B47,
"Catfish": 0x657D82,
"Cathay Spice": 0x99642C,
"Cathedral": 0xACAAA7,
"Cathedral Glass": | |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon KMS
.. versionadded:: beryllium
:configuration: This module accepts explicit kms credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
kms.keyid: <KEY>
kms.key: <KEY>
A region may also be specified in the configuration::
kms.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
myprofile:
keyid: <KEY>
key: <KEY>
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
# pylint: disable=E0602
from __future__ import absolute_import
# Import Python libs
import logging
from salt.utils.serializers import json
from distutils.version import LooseVersion as _LooseVersion # pylint: disable=import-error,no-name-in-module
# Import Salt libs
import salt.utils.compat
import salt.utils.odict as odict
log = logging.getLogger(__name__)
# Import third party libs
try:
# pylint: disable=unused-import
import boto
# KMS added in version 2.38.0
required_boto_version = '2.38.0'
if (_LooseVersion(boto.__version__) <
_LooseVersion(required_boto_version)):
msg = 'boto_kms requires boto {0}.'.format(required_boto_version)
log.debug(msg)
raise ImportError()
import boto.kms
# pylint: enable=unused-import
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
if HAS_BOTO:
__utils__['boto.assign_funcs'](__name__, 'kms')
def create_alias(alias_name, target_key_id, region=None, key=None, keyid=None,
profile=None):
'''
Create a display name for a key.
CLI example::
salt myminion boto_kms.create_alias 'alias/mykey' key_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.create_alias(alias_name, target_key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def create_grant(key_id, grantee_principal, retiring_principal=None,
operations=None, constraints=None, grant_tokens=None,
region=None, key=None, keyid=None, profile=None):
'''
Adds a grant to a key to specify who can access the key and under what
conditions.
CLI example::
salt myminion boto_kms.create_grant 'alias/mykey' 'arn:aws:iam::1111111:/role/myrole' operations='["Encrypt","Decrypt"]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if key_id.startswith('alias/'):
key_id = _get_key_id(key_id)
r = {}
try:
r['grant'] = conn.create_grant(
key_id,
grantee_principal,
retiring_principal=retiring_principal,
operations=operations,
constraints=constraints,
grant_tokens=grant_tokens
)
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def create_key(policy=None, description=None, key_usage=None, region=None,
key=None, keyid=None, profile=None):
'''
Creates a master key.
CLI example::
salt myminion boto_kms.create_key '{"Statement":...}' "My master key"
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
_policy = json.serialize(policy)
try:
key_metadata = conn.create_key(
_policy,
description=description,
key_usage=key_usage
)
r['key_metadata'] = key_metadata['KeyMetadata']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def decrypt(ciphertext_blob, encryption_context=None, grant_tokens=None,
region=None, key=None, keyid=None, profile=None):
'''
Decrypt ciphertext.
CLI example::
salt myminion boto_kms.decrypt encrypted_ciphertext
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
plaintext = conn.decrypt(
ciphertext_blob,
encryption_context=encryption_context,
grant_tokens=grant_tokens
)
r['plaintext'] = plaintext['Plaintext']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def key_exists(key_id, region=None, key=None, keyid=None, profile=None):
'''
Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
r['result'] = True
except boto.exception.BotoServerError as e:
if isinstance(e, boto.kms.exceptions.NotFoundException):
r['result'] = False
return r
r['error'] = __utils__['boto.get_error'](e)
return r
def _get_key_id(alias, region=None, key=None, keyid=None, profile=None):
'''
From an alias, get a key_id.
'''
key_metadata = describe_key(
alias, region, key, keyid, profile
)['key_metadata']
return key_metadata['KeyId']
def describe_key(key_id, region=None, key=None, keyid=None, profile=None):
'''
Get detailed information about a key.
CLI example::
salt myminion boto_kms.describe_key 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
r['key_metadata'] = key['KeyMetadata']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def disable_key(key_id, region=None, key=None, keyid=None, profile=None):
'''
Mark key as disabled.
CLI example::
salt myminion boto_kms.disable_key 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.disable_key(key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def disable_key_rotation(key_id, region=None, key=None, keyid=None,
profile=None):
'''
Disable key rotation for specified key.
CLI example::
salt myminion boto_kms.disable_key_rotation 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.disable_key_rotation(key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def enable_key(key_id, region=None, key=None, keyid=None, profile=None):
'''
Mark key as enabled.
CLI example::
salt myminion boto_kms.enable_key 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.enable_key(key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def enable_key_rotation(key_id, region=None, key=None, keyid=None,
profile=None):
'''
Disable key rotation for specified key.
CLI example::
salt myminion boto_kms.enable_key_rotation 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.enable_key_rotation(key_id)
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def encrypt(key_id, plaintext, encryption_context=None, grant_tokens=None,
region=None, key=None, keyid=None, profile=None):
'''
Encrypt plaintext into cipher text using specified key.
CLI example::
salt myminion boto_kms.encrypt 'alias/mykey' 'myplaindata' '{"aws:username":"myuser"}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
ciphertext = conn.encrypt(
key_id,
plaintext,
encryption_context=encryption_context,
grant_tokens=grant_tokens
)
r['ciphertext'] = ciphertext['CiphertextBlob']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
key_spec=None, grant_tokens=None, region=None, key=None,
keyid=None, profile=None):
'''
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
data_key = conn.generate_data_key(
key_id,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
grant_tokens=grant_tokens
)
r['data_key'] = data_key
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def generate_data_key_without_plaintext(
key_id, encryption_context=None, number_of_bytes=None, key_spec=None,
grant_tokens=None, region=None, key=None, keyid=None, profile=None
):
'''
Generate a secure data key without a plaintext copy of the key.
CLI example::
salt myminion boto_kms.generate_data_key_without_plaintext 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
data_key = conn.generate_data_key_without_plaintext(
key_id,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
grant_tokens=grant_tokens
)
r['data_key'] = data_key
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def generate_random(number_of_bytes=None, region=None, key=None, keyid=None,
profile=None):
'''
Generate a random string.
CLI example::
salt myminion boto_kms.generate_random number_of_bytes=1024
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
random = conn.generate_random(number_of_bytes)
r['random'] = random['Plaintext']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def get_key_policy(key_id, policy_name, region=None, key=None, keyid=None,
profile=None):
'''
Get the policy for the specified key.
CLI example::
salt myminion boto_kms.get_key_policy 'alias/mykey' mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key_policy = conn.get_key_policy(key_id, policy_name)
r['key_policy'] = json.deserialize(
key_policy['Policy'],
object_pairs_hook=odict.OrderedDict
)
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def get_key_rotation_status(key_id, region=None, key=None, keyid=None,
profile=None):
'''
Get status of whether or not key rotation is enabled for a key.
CLI example::
salt myminion boto_kms.get_key_rotation_status 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key_rotation_status = conn.get_key_rotation_status(key_id)
r['result'] = key_rotation_status['KeyRotationEnabled']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def list_grants(key_id, limit=None, marker=None, region=None, key=None,
keyid=None, profile=None):
'''
List grants for the specified key.
CLI example::
salt myminion boto_kms.list_grants 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if key_id.startswith('alias/'):
key_id = _get_key_id(key_id)
r = {}
try:
grants = conn.list_grants(
key_id,
limit=limit,
marker=marker
)
# TODO: handle limit/marker automatically
r['grants'] = grants['Grants']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def list_key_policies(key_id, limit=None, marker=None, region=None, key=None,
keyid=None, profile=None):
'''
List key_policies for the specified key.
CLI example::
salt myminion boto_kms.list_key_policies 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if key_id.startswith('alias/'):
key_id = _get_key_id(key_id)
r = {}
try:
key_policies = conn.list_key_policies(
key_id,
limit=limit,
marker=marker
)
# TODO: handle limit, marker and truncation automatically.
r['key_policies'] = key_policies['PolicyNames']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
def put_key_policy(key_id, policy_name, policy, region=None, key=None,
keyid=None, profile=None):
'''
Attach a key policy to the specified key.
CLI example::
salt myminion boto_kms.put_key_policy 'alias/mykey' default '{"Statement":...}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
conn.put_key_policy(key_id, policy_name, json.serialize(policy))
r['result'] = True
except boto.exception.BotoServerError as e:
r['result'] = False
r['error'] = __utils__['boto.get_error'](e)
return r
def re_encrypt(ciphertext_blob, destination_key_id,
source_encryption_context=None,
destination_encryption_context=None, grant_tokens=None,
region=None, key=None, keyid=None, profile=None):
'''
Reencrypt encrypted | |
from flask import Flask, render_template, jsonify, json, url_for, request, redirect, Response, flash, abort, make_response, send_file
import requests
import io
import os
import csv
import datetime
import investmentportfolio
print ('Running Portfolio Analyze')
app = Flask(__name__)
# On IBM Cloud, get the port number from the environment variable VCAP_APP_PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('VCAP_APP_PORT', 8080))
host='0.0.0.0'
#======================================MAIN PAGES======================================
@app.route('/')
def run():
"""
Load the site page
"""
return render_template('index.html')
#======================================DATABASE MANAGEMENT==============================
@app.route('/api/upload', methods=['POST'])
def portfolio_from_csv():
"""
Loads a portfolio in Algo Risk Service (ARS) format into the Investment Portfolio service.
"""
holdings = {
'timestamp':'{:%Y-%m-%dT%H:%M:%S.%fZ}'.format(datetime.datetime.now()),
'holdings':[]
}
data = json.loads(request.data)
data = [row.split(',') for row in data]
headers = data[0]
#Loop through and segregate each portfolio by its identifier (there may be multiple in the file)
#Column 1 (not 0) is the ID column. Column 5 is the PORTFOLIO column...
portfolios = {}
navs = {}
unique_id_col = headers.index("UNIQUE ID")
id_type_col = headers.index("ID TYPE")
name_col = headers.index("NAME")
ticker_col = headers.index("TICKER")
pos_units_col = headers.index("POSITION UNITS")
portfolio_col = headers.index("PORTFOLIO")
price_col = headers.index("PRICE")
currency_col = headers.index("CURRENCY")
#for d in data...
for d in data[1:]:
hldg = {
"name":d[name_col],
"instrumentId":d[unique_id_col],
"quantity":d[pos_units_col]
}
if len(headers)>5:
for meta in headers[6:]:
hldg[meta.replace('\r','')] = d[headers.index(meta)].replace('\r','')
if d[portfolio_col] not in portfolios:
portfolios[d[portfolio_col]] = [hldg]
else:
portfolios[d[portfolio_col]].append(hldg)
#Send each portfolio and its holdings to the investment portfolio service
for key, value in portfolios.items():
if key == 'User Portfolio' or key == 'Sample Portfolio':
portfolio_type = 'User Portfolio'
else:
portfolio_type = 'look through portfolio'
my_portfolio = {
"timestamp": '{:%Y-%m-%dT%H:%M:%S.%fZ}'.format(datetime.datetime.now()) ,
'closed':False,
'data':{'type':portfolio_type},
'name':key
}
#Don't show look-through portfolios in main drop-down menu
#create portfolio
try:
req = investmentportfolio.Create_Portfolio(my_portfolio)
except:
print("Unable to create portfolio for " + str(key) + ".")
try:
for h in range(0,len(value),100):
hldgs = value[h:h+100]
req = investmentportfolio.Create_Portfolio_Holdings(str(key),hldgs)
except:
print("Unable to create portfolio holdings for " + str(key) + ".")
return req
#Returns list of 'look through' portfolios
@app.route('/api/look_through_portfolios',methods=['GET'])
def get_look_through_portfolios():
'''
Returns the available user portfolio names in the Investment Portfolio service.
Uses type='user_portfolio' to specify.
'''
portfolio_names = []
res = investmentportfolio.Get_Portfolios_by_Selector('type','User Portfolio') #Filters out look-through portfolios
try:
for portfolios in res['portfolios']:
portfolio_names.append(portfolios['name'])
#returns the portfolio names as list
return Response(json.dumps(portfolio_names), mimetype='application/json')
except:
return "No portfolios found."
#Deletes all look through holdings and portfolios for cleanup
@app.route('/api/look_through_delete',methods=['GET'])
def get_look_through_delete():
'''
Deletes all portfolios and respective holdings that are of type 'look through'
'''
portfolios = investmentportfolio.Get_Portfolios_by_Selector('type','look through portfolio')['portfolios']
for p in portfolios:
holdings = investmentportfolio.Get_Portfolio_Holdings(p['name'],False)
# delete all holdings
for h in holdings['holdings']:
timestamp = h['timestamp']
rev = h['_rev']
investmentportfolio.Delete_Portfolio_Holdings(p['name'],timestamp,rev)
investmentportfolio.Delete_Portfolio(p['name'],p['timestamp'],p['_rev'])
return "Portfolios deleted successfully."
#======================================LOOK THROUGH CALCULATIONS==============================
#Returns list of 'look through' portfolios with the additional portfolio
def get_universe(portfolio):
#portfolio object as input
universe = portfolio
look_throughs = [item["TICKER"] for item in portfolio if item["HAS_LOOKTHROUGH"] == 'TRUE']
for l in look_throughs:
#Get fund's individual holdings
fund = investmentportfolio.Get_Portfolio_Holdings(l,False)['holdings']
fund = [item['holdings'] for item in fund] #since we loaded the data in chunks originally
fund = [item for sublist in fund for item in sublist] #flatten the list
universe += [item for item in fund if item['TICKER'] != '']
return universe
#Returns an augmented universe with effective portfolio value per security (in case there's a significant difference in processing time with the above)
def get_expanded_universe(portfolio,portfolio_NAV):
#portfolio object as input
universe = portfolio
for p in portfolio:
p.update({'portfolio_value':float(p['quantity'])*float(p['PRICE']),'user_portfolio':True})
look_throughs = [item["TICKER"] for item in portfolio if item["HAS_LOOKTHROUGH"] == 'TRUE']
for l in look_throughs:
#Get fund's NAV from user portfolio (that's where the data lives) and our exposure to that fund (in $)
fund_NAV = [float(item['FUND_NAV']) for item in portfolio if item['TICKER'] == l][0]
exposure_to_fund = ([float(item['quantity']) * float(item['PRICE']) for item in portfolio if item['TICKER'] == l][0])/portfolio_NAV
#Get fund's individual holdings
fund = investmentportfolio.Get_Portfolio_Holdings(l,False)['holdings']
fund = [item['holdings'] for item in fund] #since we loaded the data in chunks originally
fund = [item for sublist in fund for item in sublist] #flatten the list
#calculate effective dollar exposure to each fund based on market value in parent portfolio
for f in fund:
#errors in csv file formats can cause issues here
if f['HAS_LOOKTHROUGH'] == 'FALSE':
try:
f.update({'portfolio_value':((float(f['quantity']) *float(f['PRICE']))/ fund_NAV) * (exposure_to_fund*portfolio_NAV),'user_portfolio':False})
except:
print('look at ' + str(f['name']))
universe += fund
return universe
@app.route('/api/search-universe/<portfolio>',methods=['GET','POST'])
def search_universe(portfolio):
'''
Returns the total list of securities touched by an investment portfolio (e.g. including look-throughs).
'''
if request.method == 'POST':
req = request.get_json(silent=True)
portfolio = req['portfolio']
portfolio = investmentportfolio.Get_Portfolio_Holdings(portfolio,False)['holdings'] # client portfolio
portfolio = [item['holdings'] for item in portfolio] #since we loaded the data in chunks originally
portfolio = [item for sublist in portfolio for item in sublist] #flatten the list'
universe = get_universe(portfolio)
universe = [item['name'] + ' (' + item['TICKER'] + ')' for item in universe]
return Response(json.dumps(universe), mimetype='application/json')
@app.route('/api/portfolio-composition',methods=['POST'])
def portfolio_composition():
'''
Returns a list of aggregations (e.g. geography, sector) and their portfolio value per member currently in dollar value terms.
'''
if request.method == 'POST':
req = request.get_json(silent=True)
portfolio = investmentportfolio.Get_Portfolio_Holdings(req['portfolio'],False)['holdings'] # client portfolio
portfolio = [item['holdings'] for item in portfolio] #since we loaded the data in chunks originally
portfolio = [item for sublist in portfolio for item in sublist] #flatten the list'
aggregations = req["aggregations"] # aggregations to compute
NAV = sum(float(item['quantity'])*float(item['PRICE']) for item in portfolio)
universe = get_expanded_universe(portfolio)
exposures = {"NAV":NAV}
for a in aggregations:
values = {}
#get unique entries for the given aggregation (keep an eye out for python2 --> 3 quirks)
unique_a = {item[a]:item[a] for item in universe}.values()
for u in unique_a:
values[u] = sum([item['portfolio_value'] for item in universe if item[a]==u])
exposures[a] = values
return Response(json.dumps(exposures), mimetype='application/json')
@app.route('/api/portfolio-analyze/<portfolio>',methods=['GET','POST'])
def portfolio_analyze(portfolio):
'''
Returns data compatible with the Portfolio.Analyze() v1.0 front-end GUI
'''
if request.method == 'POST':
req = request.get_json(silent=True)
portfolio = req['portfolio']
portfolio_name = portfolio #persist name
portfolio = investmentportfolio.Get_Portfolio_Holdings(portfolio,False)['holdings'] # client portfolio
portfolio = [item['holdings'] for item in portfolio] #since we loaded the data in chunks originally
portfolio = [item for sublist in portfolio for item in sublist] #flatten the list'
aggregations = ["geography","Asset Class","sector","has_Tobacco","has_Alcohol","has_Gambling","has_Military","has_Fossil Fuels","esg_Controversy","esg_Environmental","esg_Governance","esg_Social","esg_Sustainability"]
NAV = sum(float(item['quantity'])*float(item['PRICE']) for item in portfolio)
response = {
"NAV":NAV,
'sin':{},
'esg':{portfolio_name:{}},
'search':[], # search universe
'portfolio':[{'name':item['name'],'value ($USD)':(float(item['quantity'])*float(item['PRICE'])),'Portfolio Contribution (%)':((float(item['quantity'])*float(item['PRICE']))/NAV)*100,'Industry Sector':item['sector'],'Asset Class':item['Asset Class'],'Geography':item['geography']} for item in portfolio],
'composition':{}
}
universe = get_expanded_universe(portfolio,NAV)
response['search'] = list(set([item['name'] + ' (' + item['TICKER'] + ')' for item in universe]))
#hard-coded benchmarks for now, as it's possible a user would want to make benchmark choices static...
#benchmarks = ['IVV','HYG','LQD']
benchmarks = ['IVV']
for b in benchmarks:
response['esg'][b] = {}
#Calculate data for response
for a in aggregations:
#sin stocks - just need true
if 'has_' in a:
#we omit the parent funds in the portfolio (has_lookthrough=true) to avoid double counting the exposure
response['sin'][a] = sum([item['portfolio_value'] for item in universe if item['HAS_LOOKTHROUGH']=='FALSE' if item[a]=='TRUE'])
#esg
elif 'esg_' in a:
#compute average ESG for the portfolio (and benchmarks!)
response['esg'][portfolio_name][a] = sum([(item['portfolio_value']/NAV)*float(item[a]) for item in universe if item['HAS_LOOKTHROUGH']=='FALSE'])
#regular aggregations
else:
values = {}
#get unique entries for the given aggregation (keep an eye out for python3 quirks)
unique_a = {item[a]:item[a] for item in universe}.values()
for u in unique_a:
values[u] = sum([item['portfolio_value'] for item in universe if item['HAS_LOOKTHROUGH']=='FALSE' if item[a]==u])
response['composition'][a] = values
#get ESG data for benchmarks
for b in benchmarks:
portfolio = investmentportfolio.Get_Portfolio_Holdings(b,False)['holdings']
portfolio = [item['holdings'] for item in portfolio] #since we loaded the data in chunks originally
portfolio = [item for sublist in portfolio for item in sublist] #flatten the list'
b_NAV = sum(float(item['quantity'])*float(item['PRICE']) for item in portfolio)
b_universe = get_expanded_universe(portfolio,b_NAV)
for a in aggregations:
if 'esg_' in a:
#compute average ESG for the portfolio (and benchmarks!)
response['esg'][b][a] = sum([(item['portfolio_value']/b_NAV)*float(item[a]) for item in b_universe if item['HAS_LOOKTHROUGH']=='FALSE'])
#create world investment json for the D3 element
create_world_json(response['composition']["geography"])
return Response(json.dumps(response), mimetype='application/json')
#Returns list of 'look through' portfolios (returns results)
@app.route('/api/search/<portfolio>/<security>',methods=['GET','POST'])
def search(portfolio,security):
'''
Returns details around the true presence of a given security [by ticker for now] in a portfolio.
'''
if request.method == 'POST':
req = request.get_json(silent=True)
portfolio = req["portfolio"]
security = req["security"] # security to check
portfolio = investmentportfolio.Get_Portfolio_Holdings(portfolio,False)['holdings'] # client portfolio
portfolio = [item['holdings'] for item in portfolio] #since we loaded | |
<reponame>pipesanta/WS8552-python<gh_stars>0
import serial
import time
class WS8552_FingerPrintReader(object):
# define ACK_SUCCESS 0x00 //Operation successfully
# define ACK_FAIL 0x01 // Operation failed
# define ACK_FULL 0x04 // Fingerprint database is full
# define ACK_NOUSER 0x05 //No such user
# define ACK_FIN_EXIST 0x07 // already exists
# define ACK_TIMEOUT 0x08 // Acquisition timeout
def __init__(self, port, baudrate):
super(WS8552_FingerPrintReader, self).__init__()
self.defaultTimeout= 0.3
self.ser = serial.Serial(port, baudrate, timeout=self.defaultTimeout, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS)
self.debug = True
def info(self):
print(self.ser)
def prepareForDaemon(self):
self.ser.timeout = 0
print("timeout {}".format(self.ser.timeout))
def read_response(self):
all_output = []
output = ''
i = 0
output = self.ser.read()
while output != '':
if i == 0:
pass
else:
output = self.ser.read()
if output != '':
all_output.insert(i, output)
i += 1
self.ser.flushOutput()
validResponse = self.validateGenericResponse(all_output)
if not(validResponse):
if(self.debug): print("response not valid => --{}--".format(all_output))
return False
self.printResponse(all_output)
return all_output
def readResponseUntil(self, bytesAtEnd, size):
output = self.ser.read_until(bytesAtEnd, size)
self.printResponse(output)
return output
def validateGenericResponse(self, response):
result = False
if(len(response) < 8 ): return False
firstByte = ord(response[0]) == 0xF5 # 0xF5 mandatory header
lastByte = ord(response[-1]) == 0xF5 # 0x00 mandatory end response
result = (firstByte and lastByte)
return result
def printRequest(self, request):
if not(self.debug): return
data = []
for byte in request:
data.append(hex(byte))
print("[debug] [{} bytes] WRITING COMMAND: {} ".format(len(data), data) )
def printResponse(self, response):
if not(self.debug): return
data = []
for byte in response:
data.append(hex(ord(byte)))
print("[debug] [{} bytes] RESPONSE: {}".format(len(data), data))
def send_request(self, cmd, parameters):
ver = 0x00
buf = []
buf.insert(0, 0xF5) # Command head
buf.insert(1, cmd) # Command
buf.insert(2, parameters[0]) # parameter 1
buf.insert(3, parameters[1]) # parameter 2
buf.insert(4, parameters[2]) # parameter 3
buf.insert(5, 0x00) # 0 before CHK
for i in range(1, 6):
ver = ver ^ buf[i]
buf.insert(6, ver) # CHK
buf.insert(7, 0xF5) # end of command
self.printRequest(buf)
self.ser.write(buf)
self.ser.flushInput()
def printOnDebug(self, msj):
if(self.debug):
print(msj)
# Enable the module into a dormant state (Both command and response are 8 bytes)
def enableDormantState(self):
if(self.debug): print("## ENABLING DORMAN STATE ##")
# self.send_request(0X2C, [0x00, 0x00, 0x00])
# response = self.read_response()
# return response
# Read the fingerprint add mode (Both command and response are 8 bytes)
def readFingerprintAddMode(self):
if(self.debug): print("Reading fingerprint add mode")
self.send_request(0x2D, [0x00, 0x00, 0x01])
response = self.readResponseUntil([0xF5], 8)
if not(response): return False
ackByte = ord(response[4])
# Verify response
if( ackByte == 0x01):
print("ACK FAILED")
return
elif(ackByte == 0x00):
mode = ord(response[3])
if(mode == 0x00):
return "ALLOW_REPEAT"
elif(mode == 0x01):
return "PROHIBIT_REPEAT"
# Set the fingerprint add mode (Both command and response are 8 bytes)
def setFingerprintAddMode(self, mode):
''' 0x00 ALLOW, 0x01 PROHIBIT '''
if(self.debug): print("setting fingerprint add mode: {}".format(mode))
self.send_request(0x2D, [0x00, mode, 0x00])
response = self.readResponseUntil([0xF5], 8)
# Verify response
successByte = ord(response[4])
if(successByte == 0x01):
print ("readFingerprintAddMode operation failed")
return False
elif(successByte == 0x00):
return True
# To ensure the effectiveness, user must input a fingerprint three times, the host is required to send command to the
# DSP module three times.
def addFingerprint(self, time, userid, userPrivilege):
'''
Range of user number is 1 - 0xFFF;
Range of User privilege is 1, 2, 3, its meaning is defined by secondary developers themselves.
'''
self.printOnDebug("Adding fingerprint #{} with userId {}, with privilege level {}".format(time, userid, userPrivilege))
if not(userPrivilege in [1,2,3]):
self.printOnDebug("User privilege is not allowed")
return False
self.ser.timeout = None
userIdAsBinary = format(userid, '016b')
p1 = userIdAsBinary[0: 8]
p2 = userIdAsBinary[8: 16]
self.send_request(time, [ int(p1, 2), int(p2, 2), userPrivilege ])
response = self.readResponseUntil([0xF5], 8)
self.ser.timeout = self.defaultTimeout
validResponse = False
if (len(response) == 0):
self.printOnDebug("DATA RESPONSE IS EMPTY")
return False
ackByte = ord(response[4])
if(len(response) == 8 and ackByte == 0x01 ):
self.printOnDebug("FAILED ACK")
return "ACK_FAIL"
if(len(response) == 8 and ackByte == 0x06 ):
self.printOnDebug("FAILED ACK")
return "ACK_USER_EXIST"
if(len(response) == 8 and ackByte == 0x00 ): validResponse = True
return validResponse
# Delete specified user (Both command and response are 8 bytes)
def deleteUser(self, userId):
self.printOnDebug("DELETING USER {}".format(userId))
userIdAsBinary = format(userId, '016b')
p1 = userIdAsBinary[0: 8]
p2 = userIdAsBinary[8: 16]
self.send_request(0x04, [int(p1, 2), int(p2, 2), 0x00])
response = self.readResponseUntil([0xF5], 8)
validResponse = False
if not(len(response) == 8): return False
ackByte = ord(response[4])
if(ackByte == 0x01): self.printOnDebug("FAILED ACK")
if(ackByte == 0x05): self.printOnDebug("NO SUCH USER")
if(ackByte == 0x00): validResponse = True
return validResponse
# Delete all users (Both command and response are 8 bytes)
def deleteAllUSers(self):
if(self.debug): print("DELETING ALL USERS")
self.send_request(0x05, [0x00, 0x00, 0x00])
response = self.readResponseUntil([0xF5], 8)
if not(len(response) == 8): return False
return ord(response[4]) == 0x00
# Acquire the total number of users (Both command and response are 8 bytes)
def getTotalUserNumber(self):
if(self.debug): print("Calling getTotalUserNumber")
self.send_request(0x09, [0x00, 0x00, 0x00])
result = self.readResponseUntil([0xf5], 8)
# Verify response
if not (len(result) == 8): return False
numberAsBinary = "".join([
format(ord(result[2]), '08b'),
format(ord(result[3]), '08b')
])
responseAsInt = int(numberAsBinary, 2)
return responseAsInt
# Compare 1:1 (Both command and response are 8 bytes)
def compareOneToOne(self, userId):
if(self.debug): print("COMPARING 1:1 UserID: {}".format(userId))
self.ser.timeout = None
userIdAsBinary = format(userId, '016b')
p1 = userIdAsBinary[0: 8]
p2 = userIdAsBinary[8: 16]
self.send_request(0x0B, [int(p1, 2), int(p2, 2), 0x00])
response = self.readResponseUntil([0xF5], 8)
if not(len(response) == 8):
self.printOnDebug("RESPONSE HAS N0T 8 BYTES")
return False
switcher = { 0: True, 1: "ACK FAIL", 5: "NOT SUCH USER"}
self.ser.timeout = self.defaultTimeout
return switcher.get(ord(response[4]))
# Compare 1: N (Both command and response are 8 bytes)
def compareOneToMany(self):
if(self.debug): print("COMPARING 1: N")
self.ser.timeout = None
self.send_request(0x0C, [0x00, 0x00, 0x00])
response = self.readResponseUntil([0xF5], 8)
self.ser.timeout = self.defaultTimeout
byte4 = ord(response[4])
if(byte4 == 0x05 ):
self.printOnDebug("NO SUCH USER")
return ["NO SUCH USER"]
if(byte4 == 0x08):
self.printOnDebug("ACK TIMEOUT")
return ["ACK TIMEOUT"]
if(byte4 in [0x01, 0x02, 0x03]):
## hex to int
byte2 = format(ord(response[2]), '08b')
byte3 = format(ord(response[3]), '08b')
userId = int( "".join([byte2, byte3]), 2 )
return [userId, byte4]
return False
# Acquire user privilege (Both command and response are 8 bytes)
def getUserPrivilege(self, userId):
if(self.debug): print("Getting privileges of user with id: {}".format(userId))
userIdAsBinary = format(userId, '016b')
p1 = userIdAsBinary[0: 8]
p2 = userIdAsBinary[8: 16]
self.send_request(0x0A, [int(p1, 2), int(p2, 2), 0x00])
response = self.readResponseUntil([0xF5], 8)
if not(len(response) == 8):
self.printOnDebug("response size is {}, 8 was expected ".format(len(response)))
return False
byte4 = ord(response[4])
if( byte4 == 0x05 ):
self.printOnDebug("NOT SUCH USER")
return False
if(byte4 in [0x01, 0x02, 0x03]):
return byte4
return False
# Acquire DSP module version number (command = 8 bytes, and response > 8 bytes)
def getDspModuleVersionNumber(self):
if(self.debug): print("getting DSP module version number")
self.send_request(0x26, [0x00, 0x00, 0x00])
response = self.read_response()
if not(response): return False
headerResponse = response[0:7]
dataPackage = response[8:]
responseAsString = []
ackByte = ord(headerResponse[4])
if( ackByte == 0x01 ):
if(self.debug): print("ACK FAIL")
return False
elif(ackByte == 0x00):
lenHi = format(ord(headerResponse[2]), "08b")
lenLow = format(ord(headerResponse[3]), "08b")
lenSize = int( lenHi + lenLow, 2)
responseAsString = []
data = dataPackage[1: (1+lenSize)]
for byte in data:
responseAsString.append(chr(ord(byte)))
return "".join(responseAsString)
return False
# Read comparison level (Both command and response are 8 bytes)
def getComparisonLevel(self):
if(self.debug): print("Reading Comparison level")
self.ser.timeout = None
self.send_request(0x28, [0x00, 0x00, 0x01])
response = self.readResponseUntil([0xF5], 8)
self.ser.timeout = self.defaultTimeout
validResponse = False
if not(len(response) == 8):
self.printOnDebug("Response size is {}. 8 was expected".format(len(response)))
return validResponse
ackByte = ord(response[4])
if(ackByte == 0x01):
self.printOnDebug("ACK FAILED")
elif(ackByte == 0x00):
return ord(response[3])
return validResponse
# Set comparison level (Both command and response are 8 bytes)
def setComparisonLevel(self, level):
if not(level in [0,1,2,3,4,5,6,7,8,9]):
self.printOnDebug("[Error] {} level is not allowed".format(level))
return False
self.ser.timeout = None
self.printOnDebug("Setting comparison level. {}".format(level))
self.send_request(0x28, [0x00, level, 0x00])
response = self.readResponseUntil([0xF5], 8)
self.ser.timeout = self.defaultTimeout
ackByte = ord(response[4])
return (ackByte == 0x00)
# Acquire and upload images
# Acquire and upload images (Command = 8 bytes, response > 8 bytes)
def getAndUploadImages(self):
self.printOnDebug("Getting uploaded images... ")
self.ser.timeout = 3
self.send_request(0x24, [0x00, 0x00, 0x00])
response = self.read_response()
if not(response): return False
headerResponse = response[0:7]
dataPackage = response[8:]
responseAsString = []
ackByte = ord(headerResponse[4])
if( ackByte == 0x01 ):
self.printOnDebug("ACK FAIL")
return False
elif(ackByte == | |
<filename>MultiQubit_PulseGenerator/NQB/nqb_tomo_functions.py
'''
-------------------------------------------------
- Suite of functions for tomography -
-------------------------------------------------
Generalized MLE code written by <NAME> (<EMAIL>),
based on 1-2QB code written by <NAME> (<EMAIL>)
with input from EQuS team and LL team.
'''
import numpy as np
from scipy.linalg import lu
import scipy
from scipy.optimize import minimize
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
import MainDataHelpers.n_qubit_bharath.pulse_schemes as ps
import itertools
import time
from collections import Counter
converge_vals = {}
iter_density = []
iter_number = 0
TGLOBAL = 0
METHOD = 0
N = 0
def callbackIter(Xk):
global iter_density
global TGLOBAL
global METHOD
global iter_number
global N
iter_density.append(Xk)
cur_time = time.time()
TGLOBAL = cur_time
iter_number += 1
if iter_number%10==0: print(iter_number)
def MLE_NQB_sim(n, measurements, paulis, variances, density_matrix,
tolerance, random_initialization, max_iter_num, nlvl=2,
plot_convergence=False, verbose=False, directory_path=""):
"""Maximum likelihood estimation for n_qubit n_level qubit state tomography.
For illustrative purposes, all examples are done with 3-qubit systems:
Detailed description of the entire MLE process:
The expectation value of
m_<III>
m_<IIA>
m_<IAI>
m_<IAA>
m_<AII>
m_<AIA>
m_<AAI>
m_<AAA>
for a 8x8 Matrix
constructed as the tensor product of two paulis, can be found via:
[m_<III>, m_<IIA>, m_<IAI>, m_<IAA>, m_<AII>, m_<AIA>, m_<AAI>, m_<AAA> ]
= betas^{-1}*[p_000^A, p_001^A, p_010^A, p_011^A, p_100^A, p_101^A, p_110^A, p_111^A]
and the objective function L to be minimized to enfore PSD is given by:
L = sum_{A}(m_<A> - Tr(A*rho_T) )**2
where rho_T is the cholesky decomposition of a 8x8 matrix, and A is the
tensor product of three pauli matrices. E.g. for the '9 pulse scheme'
typically used in Labber:
A = [XXX = sx @ sx @ sx YYY = sy @ sy @ sy ZZZ = sz @ sz @ sz
XXI = sx @ sx @ Id YYI = sy @ sy @ Id ZZI = sz @ sz @ Id
XIX = sx @ Id @ sx YIY = sy @ Id @ sy ZIZ = sz @ Id @ sz
XII = sx @ Id @ Id YII = sy @ Id @ Id ZII = sz @ Id @ Id
IXX = Id @ sx @ sx IYY = Id @ sy @ sy IZZ = Id @ sz @ sz
IXI = Id @ sx @ Id IYI = Id @ sy @ Id IZI = Id @ sz @ Id
IIX = Id @ Id @ sx IIY = Id @ Id @ sy IIZ = Id @ Id @ sz
XXY = sx @ sx @ sy XXZ = sx @ sx @ sz ZZY = sz @ sz @ sy
XYX = sx @ sy @ sx XZX = sx @ sz @ sx ZYZ = sz @ sy @ sz
XYY = sx @ sy @ sy XZZ = sx @ sz @ sz ZYY = sz @ sy @ sy
YXX = sy @ sx @ sx ZXX = sz @ sx @ sx YZZ = sy @ sz @ sz
YXY = sy @ sx @ sy ZXZ = sz @ sx @ sz YZY = sy @ sz @ sy
YYX = sy @ sy @ sx ZZX = sz @ sz @ sx YYZ = sy @ sy @ sz
XYI = sx @ sy @ Id XZI = sx @ sz @ Id ZYI = sz @ sy @ Id
XIY = sx @ Id @ sy XIZ = sx @ Id @ sz ZIY = sz @ Id @ sy
YIX = sy @ Id @ sx ZIX = sz @ Id @ sx YIZ = sy @ Id @ sz
YXI = sy @ sx @ Id ZXI = sz @ sx @ Id YZI = sy @ sz @ Id
IXY = Id @ sx @ sy IXZ = Id @ sx @ sz IZY = Id @ sz @ sy
IYX = Id @ sy @ sx IZX = Id @ sz @ sx IYZ = Id @ sy @ sz
XYZ = sx @ sy @ sz
XZY = sx @ sz @ sy
YXZ = sy @ sx @ sz
YZX = sy @ sz @ sx
ZXY = sz @ sx @ sy
ZYX = sz @ sy @ sx]
where @ corresponds to the tensor product. The corresponding pulse sequence
to get the above list of A's is:
Pulses = ['Y2m-Y2m-Y2m', ...]
If you feed this function the pulse scheme in the format outlined above,
it automatically generates the corresponding 4x4 matrices
Parameters
----------
n: int
number of qubits
probvectors : array
Array of arrays of probabilities of the form
[p_000, p_001, p_010, p_011, p_100, p_101, p_110, p_111],
for each three qubit state constructed as rho*(A @ B @ C),
where A, B, and C are three Pauli operators
betas : array
Array of arrays of betas for each qubit. Each beta corresponds to a
matrix of the form
beta[0][0] = beta_I for Q1_|0>
beta[0][1] = beta_Z for Q1_|0>
beta[1][0] = beta_I for Q1_|1>
beta[1][1] = beta_Z for Q1_|1>
pulse_scheme : Array of strings
The pulse scheme used to generate the data. See the supplementary
file 'pulse_schemes' for more descriptions
verbose : bool
If true will return not just minimal t values, but also more results
from the minimizer
PARAM RUNDOWN:
n = # qubits,
nlvl = # levels in system (default=2)
measurements = list of expectation measurements,
paulis = list of paulis (in same order as measurements),
variances =
density_matrix = IDEAL form of output (what you are hoping for),
tolerance = set to None if you don't care (can set specific value to increase speed),
random_initialization = True (slower, use random initial guess), False (faster, use density matrix as initial guess)
max_iter_num =
plot_convergence = True (make MLE convergence plots), False (don't make MLE convergence plots)
verbose = verbose output (can leave as False),
directory_path = if you want to save plots in a particular directory
Returns
-------
Array of t's
Array of 64 t's that minimize the cost function.
"""
# just guessing all t parameters are 1/(nlvl^2)^n
if random_initialization:
t_guess = np.ones((nlvl**2)**n)/((nlvl)**2**n)
#set initial guess to be desired density matrix cholesky decomposition
else:
try:
t_guess = getTsFromCholesky(np.linalg.cholesky(density_matrix))
except Exception as e: # if error, slightly perturb matrix
t_guess = getTsFromCholesky(np.linalg.cholesky(density_matrix+ 1e-14*np.eye(np.shape(density_matrix)[0])))
consts = ({'type': 'eq',
'fun': lambda t: PSDconstraint_NQB(t)})
# Now do some array juggling to make entry[0] in measuredEvals correspond
# to the first 8x8 pauli matrix in Paulis. Since some pulses (e.g. 'I-I-I')
# gives access to m_<ZZZ>, m_<ZZI>, m_<ZIZ>, m_<ZII>, m_<IZZ>, m_<IZI>, m_<IIZ>
# (which is then output from getEvalsfromProbs_3QB),
# the arrays need to be reshaped correctly:
# Now input measured expectation values and paulis to the minimizer:
if plot_convergence: # create convergence plots
# paramters to keep track of during MLE process for plotting
global iter_density # keep track of predicted params
global TGLOBAL # keep track of wall clock time
global iter_number # keep track of number of iterations
global N # number of qubits
N = n
start_time = time.time()
TGLOBAL = time.time()
# normal MLE approach
result = minimize(MLE_Functional_NQB,
t_guess,
args=(n,nlvl, measurements, paulis,variances),
constraints=consts,
options={'maxiter': max_iter_num, 'disp': False},
#tol=tolerance,
callback=callbackIter)
iter_number = 0
total_time = time.time()-start_time
num_iterations = len(iter_density)
print(n, "_", tolerance, "_", random_initialization, ": ", total_time)
# convergence plot
infidelities = []
for i in iter_density:
fidelity = np.trace(np.matmul(getRhofromt_NQB(n,nlvl,i) , density_matrix))
infidelities.append(fidelity)
f = open("./"+directory_path+"times.txt", "a")
f.write("\n"+str(n)+" "+str(tolerance)+" "+str(random_initialization)+" "+str(num_iterations)+" "+str(total_time))
fig = plt.figure()
ax = fig.gca()
plt.title("Tolerance="+str(tolerance)+" Random Init="+str(random_initialization), fontsize=10)
plt.suptitle(str(n)+" Qubit Fidelity Convergence",fontsize=16, fontweight='bold')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlabel("Iteration Number, K")
plt.ylabel("Fidelity")
plt.plot(range(1,len(infidelities)+1),infidelities)
plt.plot(range(1,len(infidelities)+1),infidelities, 'ro')
plt.xlim([0, max_iter_num])
plt.ylim([0, 1.05])
plt.plot([1 for n in range(max_iter_num)], ls='--', alpha=0.5, c='grey')
fig.savefig("./"+directory_path+str(n)+"_"+str(tolerance)+"_"+str(random_initialization)+".png")
plt.close()
iter_density = []
else:
start_time = time.time()
# normal MLE approach
result = minimize(MLE_Functional_NQB,
t_guess,
args=(n,nlvl, measurements, paulis,variances),
constraints=consts,
options={'maxiter': max_iter_num, 'disp': False},
| |
<reponame>PlaytikaResearch/abexp
# MIT License
#
# Copyright (c) 2021 Playtika Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from scipy.stats import sem, t, ttest_ind, ttest_ind_from_stats
from sklearn.utils import resample
from statsmodels.api import add_constant
from statsmodels.formula.api import logit
from statsmodels.stats.proportion import proportion_confint, proportions_ztest
class FrequentistAnalyzer:
"""
This class provides tools to perform analysis after A/B test experiments with frequentist statistical approach. It
handles both the case of means comparison and conversions comparison with closed-form-solutions. It also includes
bootstrapping and homogeneity checks of the observed samples.
"""
def __init__(self):
pass
def compare_mean_stats(self, mean_contr, mean_treat, std_contr, nobs_contr,
nobs_treat, alpha=0.05):
"""
Compare means from statistics. Compare the mean of the control group versus the mean of the treatment group.
The result is computed with t-test (closed-form solution) given the groups statistics. It assumes that sample
data are normally distributed.
Parameters
----------
mean_contr : float
Mean of the control group.
mean_treat : float
Mean of the treatment group.
std_contr : float > 0
Standard deviation of the control group. It assumes that control and treatment group have the same standard
deviation.
nobs_contr : int > 0
Number of observations in the control group
nobs_treat : int > 0
Number of observations in the treatment group
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
Null Hypothesis is true.
Returns
-------
p_value : float in interval (0,1)
p-value for the statistical test.
ci_contr : tuple
confidence interval for the control group.
ci_treat : tuple
confidence interval for the treatment group.
"""
# Compute p-value via t-test
_, p_value = ttest_ind_from_stats(mean1=mean_contr, std1=std_contr, nobs1=nobs_contr,
mean2=mean_treat, std2=std_contr, nobs2=nobs_treat)
# Define confidence level, degrees of freedom and standard error
confidence_level = 1 - alpha
df_contr = nobs_contr - 1
df_treat = nobs_treat - 1
se = std_contr * np.sqrt(1/nobs_contr + 1/nobs_treat)
# Compute confidence intervals
ci_contr = t.interval(confidence_level, df_contr, mean_contr, se)
ci_treat = t.interval(confidence_level, df_treat, mean_treat, se)
return p_value, ci_contr, ci_treat
def compare_mean_obs(self, obs_contr, obs_treat, alpha=0.05):
"""
Compare means from observed samples. Compare the mean of the control group versus the mean of the treatment
group. The result is computed with t-test (closed-form solution) given the observed samples of the two groups.
It assumes that sample data are normally distributed.
Parameters
----------
obs_contr : array_like
Observation of the control sample. It contains the value to be analyzed per each sample.
obs_treat : array_like
Observation of the treatment sample. It contains the value to be analyzed per each sample.
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
Null Hypothesis is true.
Returns
-------
p_value : float in interval (0,1)
p-value for the statistical test.
ci_contr : tuple
confidence interval for the control group.
ci_treat : tuple
confidence interval for the treatment group.
"""
# Compute p_value via t-test
_, p_value = ttest_ind(obs_contr, obs_treat)
# Define confidence level and degrees of freedom
confidence_level = 1 - alpha
df_contr = len(obs_contr) - 1
df_treat = len(obs_treat) - 1
# Compute confidence intervals
ci_contr = t.interval(confidence_level, df_contr, np.mean(obs_contr), sem(obs_contr))
ci_treat = t.interval(confidence_level, df_treat, np.mean(obs_treat), sem(obs_treat))
return p_value, ci_contr, ci_treat
def compare_conv_stats(self, conv_contr, conv_treat, nobs_contr, nobs_treat, alpha=0.05):
"""
Compare conversions from statistics. Compare the conversions of the control group versus the conversions of the
treatment group. The result is computed with z-test (closed-form solution) given the groups statistics. It
assumes that sample data are normally distributed.
Parameters
----------
conv_contr : int > 0
Number of conversions in the control group.
conv_treat : int > 0
Number of conversions in the treatment group.
nobs_contr : int > 0
Total number of observations of the control group.
nobs_treat : int > 0
Total number of observations of the treatment group.
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
Null Hypothesis is true.
Returns
-------
p_value : float in interval (0,1)
p-value for the statistical test.
ci_contr : tuple
confidence interval for the control group.
ci_treat : tuple
confidence interval for the treatment group.
"""
# Rearrange input for z-test
conv = np.array([conv_contr, conv_treat])
nobs = np.array([nobs_contr, nobs_treat])
# Compute p_value via z-test
_, p_value = proportions_ztest(conv, nobs, alternative='two-sided')
# Compute confidence intervals
ci_low, ci_upp = proportion_confint(conv, nobs, alpha=alpha)
ci_contr = [ci_low[0], ci_upp[0]]
ci_treat = [ci_low[1], ci_upp[1]]
return p_value, ci_contr, ci_treat
def compare_conv_obs(self, obs_contr, obs_treat, alpha=0.05):
"""
Compare conversions from observed samples. Compare the conversions of the control group versus the conversions
of the treatment group. The result is computed with z-test (closed-form solution) given the observed samples of
the two groups. It assumes that sample data are normally distributed.
Parameters
----------
obs_contr : array_like
Observation of the control sample. It is a boolean vector (0 or 1) which indicates weather the sample i-th
of the array was converted or not.
obs_treat : array_like
Observation of the treatment sample. It is a boolean vector (0 or 1) which indicates weather the sample i-th
of the array was converted or not.
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
Null Hypothesis is true.
Returns
-------
p_value : float in interval (0,1)
p-value for the statistical test.
ci_contr : tuple
confidence interval for the control group.
ci_treat : tuple
confidence interval for the treatment group.
"""
return self.compare_conv_stats(conv_contr=np.sum(obs_contr), conv_treat=np.sum(obs_treat),
nobs_contr=len(obs_contr), nobs_treat=len(obs_treat))
def check_homogeneity(self, df, group, cat_cols, verbose=False):
"""
Check variables homogeneity of the samples considered in the experiment. The goal is to verify homogeneity
between control and treatment groups. It performs univariate logistic regression per each variable of the input
samples where the dependent variable is the group variation.
Parameters
----------
df : pandas DataFrame of shape (n_samples, n_variables)
Input samples to be checked.
group : array-like of shape (n_samples,)
Groups variation of each sample (either 0 or 1).
cat_cols : list
List of the column names to be considered as categorical variables.
verbose : bool
Print detailed information of the logistic regression.
Returns
-------
stats : pandas DataFrame
Statistics of the logistic regression (coefficients, p-values, etc.)
"""
# Select continuous variables
cont_cols = [c for c in list(df.columns) if c not in cat_cols]
# Change type to string for categorical variables
for col in cat_cols:
df[col] = df[col].astype(str)
# Adapt categorical variables names for formula
formula_cat_cols = ["C(" + col + ", Treatment('" + str(df[col].value_counts().idxmax()) + "'))"
for col in cat_cols]
# Select columns names
formula_cols = cont_cols + formula_cat_cols
cols = cont_cols + cat_cols
stats = []
for col, formula_col in zip(cols, formula_cols):
# Define formula
formula = "group ~ " + str(formula_col)
# Add intercept to the model
dfcol = add_constant(df[col])
# Fit the | |
##
## Module & Package Import
##
import json
import os
import datetime
import statistics
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from flask import Flask, Blueprint, request, render_template, jsonify, flash, redirect
from dotenv import load_dotenv
import gspread
from gspread.exceptions import SpreadsheetNotFound
from oauth2client.service_account import ServiceAccountCredentials
##
## Credential & API Setup
##
load_dotenv()
# Google spreadsheet credentials setup
CREDENTIALS_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "auth", "google_api_credentials.json")
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets", #> Allows read/write access to the user's sheets and their properties.
"https://www.googleapis.com/auth/drive.file", #> Per-file access to files created or opened by the app.
'https://www.googleapis.com/auth/drive' #> without this, it does not fetch the data
]
credentials = ServiceAccountCredentials.from_json_keyfile_name(CREDENTIALS_FILEPATH, scope)
client = gspread.authorize(credentials)
sheet = client.open("timetracker").sheet1
# plotly credential setup
PLOTLY_USER_NAME = os.environ.get("plotly_user_name")
PLOTLY_API_KEY = os.environ.get("plotly_api_key")
plotly.tools.set_credentials_file(username=PLOTLY_USER_NAME, api_key=PLOTLY_API_KEY)
##
## Define Functions
##
# google spreadsheet actions
def get_records():
rows = sheet.get_all_records()
return sheet, rows
def create_records(a, b, c, d, e):
sheet, rows = get_records()
dates = [row["date"] for row in rows]
if a in dates:
cell = sheet.find(a)
response = sheet.update_cell(cell.row, cell.col+1, float(b))
response = sheet.update_cell(cell.row, cell.col+2, c)
else:
response = sheet.append_row([a, float(b), c, int(d), int(e)])
return response
# formula to be used for calculation
def day_of_week(d):
yyyy, mm, dd = (int(d) for d in d.split('-'))
dow_no = datetime.date(yyyy, mm, dd).weekday()
if dow_no == 0:
dow = "Mon"
elif dow_no == 1:
dow = "Tue"
elif dow_no == 2:
dow = "Wed"
elif dow_no == 3:
dow = "Thu"
elif dow_no == 4:
dow = "Fri"
elif dow_no == 5:
dow = "Sat"
elif dow_no == 6:
dow = "Sun"
return dow
def dow_week (a):
if a in ["Mon", "Tue", "Wed", "Thu", "Fri"]:
return True
else:
return False
def list_total(rows):
sum = 0
for r in rows:
sum = sum + float(r)
return sum
#def month_id():
# c_year = datetime.datetime.now().year
# c_month = datetime.datetime.now().month
# month_id = str(c_year) + str("_") + str(c_month)
# return month_id
# Calculate - average/total work hour - YTD
def total_hour_ytd(i_year):
sheet, rows = get_records()
rows_year = [r for r in rows if int(r["yyyy"]) == int(i_year)]
rows_year_hr = [r["hour"] for r in rows_year]
total_hr_ytd = round(list_total(rows_year_hr),1)
return total_hr_ytd
def avg_hour_ytd(i_year):
sheet, rows = get_records()
rows_year = [r for r in rows if int(r["yyyy"]) == int(i_year)]
rows_year_hr = [r["hour"] for r in rows_year]
total_hr_ytd = list_total(rows_year_hr)
rows_year_w = [r for r in rows_year if dow_week(r["dayofweek"]) == True and r["hour"] != 0]
count_hr_ytd = len(rows_year_w)
avg_hr_ytd = round(total_hr_ytd/count_hr_ytd,1)
return avg_hr_ytd
# Calculate - average/total work hour - MTD
def total_hour_mtd(i_year, i_month):
sheet, rows = get_records()
rows_year = [r for r in rows if int(r["yyyy"]) == int(i_year)]
rows_month = [r for r in rows_year if int(r["mm"]) == int(i_month)]
rows_month_hr = [r["hour"] for r in rows_month]
total_hr_mtd = round(list_total(rows_month_hr),1)
return total_hr_mtd
def avg_hour_mtd(i_year, i_month):
sheet, rows = get_records()
rows_year = [r for r in rows if int(r["yyyy"]) == int(i_year)]
rows_month = [r for r in rows_year if int(r["mm"]) == int(i_month)]
rows_month_hr = [r["hour"] for r in rows_month]
total_hr_mtd = list_total(rows_month_hr)
rows_month_w = [r for r in rows_month if dow_week(r["dayofweek"]) == True and r["hour"] != 0]
count_hr_mtd = len(rows_month_w)
avg_hr_mtd = round(total_hr_mtd/count_hr_mtd,1)
return avg_hr_mtd
# Work-Life balance status evaluation
def evaluate_hour(hr):
threshold_watch = 8 #> user setup
threshold_warning = 9 #> user setup
threshold_danger = 10 #> user setup
if hr <= float(threshold_watch):
evaluation = "SAFE"
elif hr > float(threshold_watch) and hr <= float(threshold_warning):
evaluation = "WATCH"
elif hr > float(threshold_warning) and hr <= float(threshold_danger):
evaluation = "WARNING"
else:
evaluation = "DANGER"
return evaluation
# Create plotly chart - ytd avg
def chart_ytd_avg():
sheet, rows = get_records()
c_year = datetime.datetime.now().year
c_month = datetime.datetime.now().month
start_year = 2009 #> user setup
threshold_watch = 8 #> user setup
threshold_warning = 9 #> user setup
threshold_danger = 10 #> user setup
year_span =[]
year_inc = start_year
while True:
year_span.append(year_inc)
if year_inc == c_year:
break
else:
year_inc = year_inc +1
avg_span = []
for i in year_span:
avg_hr_inc = avg_hour_ytd(i)
avg_span.append(avg_hr_inc)
colorlist =[]
year_inc = start_year
color_basic = 'rgba(204,204,204,1)'
color_highlight = 'rgba(26, 118, 255, 1)'
while True:
if year_inc == c_year:
colorlist.append(color_highlight)
break
else:
colorlist.append(color_basic)
year_inc = year_inc +1
data = [go.Bar(
x= year_span,
y= avg_span,
text= avg_span,
textposition = 'outside',
marker= dict(color=colorlist)
)
]
layout = {
'title': {
'text':'Yearly Average Work Hour',
'xref': 'paper',
'x': 0,
},
'xaxis': {
'title': 'Year',
},
'yaxis': {
'title': 'Daily Work Hour',
'autorange': True,
},
'shapes': [
{
'type': 'line',
'x0': int(start_year-1),
'y0': threshold_watch,
'x1': int(c_year+1),
'y1': threshold_watch,
'line':{
'color': 'green',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': int(start_year-1),
'y0': threshold_warning,
'x1': int(c_year+1),
'y1': threshold_warning,
'line':{
'color': 'yellow',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': int(start_year-1),
'y0': threshold_danger,
'x1': int(c_year+1),
'y1': threshold_danger,
'line':{
'color': 'red',
'width': 4,
'dash': 'dashdot'
},
}
]
}
fig = {
'data': data,
'layout': layout,
}
response = py.plot(fig, filename = 'chart_ytd_avg')
return response
# Create plotly chart - mtd average
def chart_mtd_avg():
sheet, rows = get_records()
c_year = datetime.datetime.now().year
c_month = datetime.datetime.now().month
threshold_watch = 8 #> user setup
threshold_warning = 9 #> user setup
threshold_danger = 10 #> user setup
month_span =[]
month_inc = 1
while True:
month_span.append(month_inc)
if month_inc == c_month:
break
else:
month_inc = month_inc +1
avg_span = []
for i in month_span:
avg_hr_inc = avg_hour_mtd(c_year, i)
avg_span.append(avg_hr_inc)
colorlist =[]
month_inc = 1
color_basic = 'rgba(204,204,204,1)'
color_highlight = 'rgba(26, 118, 255, 1)'
while True:
if month_inc == c_month:
colorlist.append(color_highlight)
break
else:
colorlist.append(color_basic)
month_inc = month_inc +1
data = [go.Bar(
x= month_span,
y= avg_span,
text= avg_span,
textposition = 'outside',
marker= dict(color=colorlist)
)
]
layout = {
'title': {
'text':str(c_year) + ' Monthly Average Work Hour',
'xref': 'paper',
'x': 0,
},
'xaxis': {
'title': str(c_year) + ' Months',
},
'yaxis': {
'title': 'Daily Work Hour',
'autorange': True,
},
'shapes': [
{
'type': 'line',
'x0': 0,
'y0': threshold_watch,
'x1': 12,
'y1': threshold_watch,
'line':{
'color': 'green',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': 0,
'y0': threshold_warning,
'x1': 12,
'y1': threshold_warning,
'line':{
'color': 'yellow',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': 0,
'y0': threshold_danger,
'x1': 12,
'y1': threshold_danger,
'line':{
'color': 'red',
'width': 4,
'dash': 'dashdot'
},
}
]
}
fig = {
'data': data,
'layout': layout,
}
response = py.plot(fig, filename = 'chart_mtd_avg')
return response
# Create plotly chart - ytd total
def chart_ytd_total():
sheet, rows = get_records()
c_year = datetime.datetime.now().year
c_month = datetime.datetime.now().month
start_year = 2009 #> user setup
threshold_watch = 8 #> user setup
threshold_warning = 9 #> user setup
threshold_danger = 10 #> user setup
ytd_tot_benchmark1 = 1356 #> user setup: Good example - Germany 2017 (Source: OECD Statistics)
ytd_tot_benchmark2 = 1780 #> user setup: Mid example - US 2017 (Source: OECD Statistics)
ytd_tot_benchmark3 = 2024 #> user setup: Bad example - S.Korea 2017 (Source: OECD Statistics)
year_span =[]
year_inc = start_year
while True:
year_span.append(year_inc)
if year_inc == c_year:
break
else:
year_inc = year_inc +1
tot_span = []
for i in year_span:
tot_hr_inc = total_hour_ytd(i)
tot_span.append(tot_hr_inc)
colorlist =[]
year_inc = start_year
color_basic = 'rgba(204,204,204,1)'
color_highlight = 'rgba(26, 118, 255, 1)'
while True:
if year_inc == c_year:
colorlist.append(color_highlight)
break
else:
colorlist.append(color_basic)
year_inc = year_inc +1
data = [go.Bar(
x= year_span,
y= tot_span,
text= tot_span,
textposition = 'outside',
marker= dict(color=colorlist)
)
]
layout = {
'title': {
'text':'Yearly Total Work Hour',
'xref': 'paper',
'x': 0,
},
'xaxis': {
'title': 'Year',
},
'yaxis': {
'title': 'Total Work Hour',
'autorange': True,
},
'shapes': [
{
'type': 'line',
'x0': int(start_year-1),
'y0': ytd_tot_benchmark1,
'x1': int(c_year+1),
'y1': ytd_tot_benchmark1,
'line':{
'color': 'green',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': int(start_year-1),
'y0': ytd_tot_benchmark2,
'x1': int(c_year+1),
'y1': ytd_tot_benchmark2,
'line':{
'color': 'yellow',
'width': 4,
'dash': 'dashdot'
},
},
{
'type': 'line',
'x0': int(start_year-1),
'y0': ytd_tot_benchmark3,
'x1': int(c_year+1),
'y1': ytd_tot_benchmark3,
'line':{
'color': 'red',
'width': 4,
'dash': 'dashdot'
},
}
]
}
fig = {
'data': data,
'layout': layout,
}
response = py.plot(fig, filename = 'chart_ytd_total')
return response
# Create plotly chart - mtd total
def chart_mtd_total():
sheet, rows = get_records()
c_year = datetime.datetime.now().year
c_month = datetime.datetime.now().month
month_span =[]
month_inc = 1
while True:
month_span.append(month_inc)
if month_inc == c_month:
break
else:
month_inc = month_inc +1
tot_span = | |
1)))
proportion[I] = (quadrant[I] - (np.pi / 2 - theta.repeat(data.shape[1], 1)[I])) \
/ (theta.repeat(data.shape[1], 1)[I])
# %%Finish Proportion Calculation
section[flats] = FLAT_ID_INT
proportion[flats] = FLAT_ID
section[section == 8] = 0 # Fence-post error correction
proportion = (1 + adjust[section]) / 2.0 - adjust[section] * proportion
return section, proportion
def _mk_adjacency_matrix(self, section, proportion, flats, elev, mag, dX, dY):
"""
Calculates the adjacency of connectivity matrix. This matrix tells
which pixels drain to which.
For example, the pixel i, will recieve area from np.nonzero(A[i, :])
at the proportions given in A[i, :]. So, the row gives the pixel
drain to, and the columns the pixels drained from.
"""
shp = section.shape
mat_data = np.row_stack((proportion, 1 - proportion))
NN = np.prod(shp)
i12 = np.arange(NN).reshape(shp)
j1 = - np.ones_like(i12)
j2 = - np.ones_like(i12)
# make the connectivity for the non-flats/pits
j1, j2 = self._mk_connectivity(section, i12, j1, j2)
j = np.row_stack((j1, j2))
i = np.row_stack((i12, i12))
# connectivity for flats/pits
if self.drain_pits:
pit_i, pit_j, pit_prop, flats, mag = \
self._mk_connectivity_pits(i12, flats, elev, mag, dX, dY)
j = np.concatenate([j.ravel(), pit_j]).astype('int64')
i = np.concatenate([i.ravel(), pit_i]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), pit_prop])
elif self.drain_flats:
j1, j2, mat_data, flat_i, flat_j, flat_prop = \
self._mk_connectivity_flats(
i12, j1, j2, mat_data, flats, elev, mag)
j = np.concatenate([j.ravel(), flat_j]).astype('int64')
i = np.concatenate([i.ravel(), flat_j]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), flat_prop])
# This prevents no-data values, remove connections when not present,
# and makes sure that floating point precision errors do not
# create circular references where a lower elevation cell drains
# to a higher elevation cell
I = ~np.isnan(mat_data) & (j != -1) & (mat_data > 1e-8) \
& (elev.ravel()[j] <= elev.ravel()[i])
mat_data = mat_data[I]
j = j[I]
i = i[I]
# %%Make the matrix and initialize
# What is A? The row i area receives area contributions from the
# entries in its columns. If all the entries in my columns have
# drained, then I can drain.
A = sps.csc_matrix((mat_data.ravel(),
np.row_stack((j.ravel(), i.ravel()))),
shape=(NN, NN))
normalize = np.array(A.sum(0) + 1e-16).squeeze()
A = np.dot(A, sps.diags(1/normalize, 0))
return A
def _mk_connectivity(self, section, i12, j1, j2):
"""
Helper function for _mk_adjacency_matrix. Calculates the drainage
neighbors and proportions based on the direction. This deals with
non-flat regions in the image. In this case, each pixel can only
drain to either 1 or two neighbors.
"""
shp = np.array(section.shape) - 1
facets = self.facets
for ii, facet in enumerate(facets):
e1 = facet[1]
e2 = facet[2]
I = section[1:-1, 1:-1] == ii
j1[1:-1, 1:-1][I] = i12[1 + e1[0]:shp[0] + e1[0],
1 + e1[1]:shp[1] + e1[1]][I]
j2[1:-1, 1:-1][I] = i12[1 + e2[0]:shp[0] + e2[0],
1 + e2[1]:shp[1] + e2[1]][I]
# Now do the edges
# left edge
slc0 = [slice(1, -1), slice(0, 1)]
for ind in [0, 1, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[1 + e1[0]:shp[0] + e1[0], e1[1]][I.ravel()]
j2[slc0][I] = i12[1 + e2[0]:shp[0] + e2[0], e2[1]][I.ravel()]
# right edge
slc0 = [slice(1, -1), slice(-1, None)]
for ind in [2, 3, 4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[1 + e1[0]:shp[0] + e1[0],
shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[1 + e2[0]:shp[0] + e2[0],
shp[1] + e2[1]][I.ravel()]
# top edge
slc0 = [slice(0, 1), slice(1, -1)]
for ind in [4, 5, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[e1[0], 1 + e1[1]:shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[e2[0], 1 + e2[1]:shp[1] + e2[1]][I.ravel()]
# bottom edge
slc0 = [slice(-1, None), slice(1, -1)]
for ind in [0, 1, 2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
I = section[slc0] == ind
j1[slc0][I] = i12[shp[0] + e1[0],
1 + e1[1]:shp[1] + e1[1]][I.ravel()]
j2[slc0][I] = i12[shp[0] + e2[0],
1 + e2[1]:shp[1] + e2[1]][I.ravel()]
# top-left corner
slc0 = [slice(0, 1), slice(0, 1)]
for ind in [6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0], e1[1]]
j2[slc0] = i12[e2[0], e2[1]]
# top-right corner
slc0 = [slice(0, 1), slice(-1, None)]
for ind in [4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0], shp[1] + e1[1]]
j2[slc0] = i12[e2[0], shp[1] + e2[1]]
# bottom-left corner
slc0 = [slice(-1, None), slice(0, 1)]
for ind in [0, 1]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[shp[0] + e1[0], e1[1]]
j2[slc0] = i12[shp[0] + e2[0], e2[1]]
# bottom-right corner
slc0 = [slice(-1, None), slice(-1, None)]
for ind in [2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
if section[slc0] == ind:
j1[slc0] = i12[e1[0] + shp[0], shp[1] + e1[1]]
j2[slc0] = i12[e2[0] + shp[0], shp[1] + e2[1]]
return j1, j2
def _mk_connectivity_pits(self, i12, flats, elev, mag, dX, dY):
"""
Helper function for _mk_adjacency_matrix. This is a more general
version of _mk_adjacency_flats which drains pits and flats to nearby
but non-adjacent pixels. The slope magnitude (and flats mask) is
updated for these pits and flats so that the TWI can be computed.
"""
e = elev.data.ravel()
pit_i = []
pit_j = []
pit_prop = []
warn_pits = []
pits = i12[flats & (elev > 0)]
I = np.argsort(e[pits])
for pit in pits[I]:
# find drains
pit_area = np.array([pit], 'int64')
drain = None
epit = e[pit]
for it in range(self.drain_pits_max_iter):
border = get_border_index(pit_area, elev.shape, elev.size)
eborder = e[border]
emin = eborder.min()
if emin < epit:
drain = border[eborder < epit]
break
pit_area = np.concatenate([pit_area, border[eborder == emin]])
if drain is None:
warn_pits.append(pit)
continue
ipit, jpit = np.unravel_index(pit, elev.shape)
Idrain, Jdrain = np.unravel_index(drain, elev.shape)
# filter by drain distance in coordinate space
if self.drain_pits_max_dist:
dij = np.sqrt((ipit - Idrain)**2 + (jpit-Jdrain)**2)
b = dij <= self.drain_pits_max_dist
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
Idrain = Idrain[b]
Jdrain = Jdrain[b]
# calculate real distances
dx = [_get_dX_mean(dX, ipit, idrain) * (jpit - jdrain)
for idrain, jdrain in zip(Idrain, Jdrain)]
dy = [dY[make_slice(ipit, idrain)].sum() for idrain in Idrain]
dxy = np.sqrt(np.array(dx)**2 + np.array(dy)**2)
# filter by drain distance in real space
if self.drain_pits_max_dist_XY:
b = dxy <= self.drain_pits_max_dist_XY
if not b.any():
warn_pits.append(pit)
continue
drain = drain[b]
dxy = dxy[b]
# calculate magnitudes
s = (e[pit]-e[drain]) / dxy
# connectivity info
# TODO proportion calculation (_mk_connectivity_flats used elev?)
pit_i += [pit for i in drain]
pit_j += drain.tolist()
pit_prop += s.tolist()
# update pit magnitude and flats mask
mag[ipit, jpit] = np.mean(s)
flats[ipit, jpit] = False
if warn_pits:
warnings.warn("Warning %d pits had no place to drain to in this "
"chunk" % len(warn_pits))
# Note: returning flats and mag here is not strictly necessary
return (np.array(pit_i, 'int64'),
np.array(pit_j, 'int64'),
np.array(pit_prop, 'float64'),
flats,
mag)
def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag):
"""
Helper function for _mk_adjacency_matrix. This calcualtes the
connectivity for flat regions. Every pixel in the flat will drain
to a random pixel in the flat. This accumulates all the area in the
flat region to a single pixel. All that area is then drained from
that pixel to the surroundings on the flat. If the border of the
flat has a single pixel with a much lower elevation, all the area will
go towards that pixel. If the border has pixels with similar elevation,
then the area will be distributed amongst all the border pixels
proportional to their elevation.
"""
nn, mm = flats.shape
NN = np.prod(flats.shape)
# Label the flats
assigned, n_flats = spndi.label(flats, FLATS_KERNEL3)
flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned)
flat_j = [None] * n_flats
flat_prop = [None] * n_flats
flat_i = [None] * n_flats
# Temporary array to find the flats
edges = np.zeros_like(flats)
# %% Calcute the flat drainage
warn_flats = []
for ii in xrange(n_flats):
ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]]
edges[:] = 0
j = ids_flats % mm
i = ids_flats // mm
for iii in [-1, 0, 1]:
for jjj in [-1, 0, | |
'''
Reads a HUD HMIS XML 3.0 Document into memory and parses its contents storing them into a postgresql database.
This is a log database, so it holds everything and doesn't worry about deduplication.
'''
import os#, sys
from .reader import Reader
from zope.interface import implementer
from lxml import etree
import dateutil.parser
from .dbobjects import * # @UnusedWildImport
@implementer(Reader)
class HMISXML30Reader:
# print "Base.metadata before create in hmisxmlreader3: ", Base.metadata
# Base.metadata.create_all(pg_db_engine)
# print "Base.metadata after create in hmisxmlreader3: ", Base.metadata
''' Implements reader interface '''
#implements (Reader)
''' Define XML namespaces '''
hmis_namespace = "http://www.hudhdx.info/Resources/Vendors/3_0/HUD_HMIS.xsd"
airs_namespace = "http://www.hudhdx.info/Resources/Vendors/3_0/AIRS_3_0_mod.xsd"
nsmap = {"hmis" : hmis_namespace, "airs" : airs_namespace}
def __init__(self, xml_file, db):
''' Put XML file into local object '''
self.xml_file = xml_file
#if settings.DEBUG:
# print "does self.xml_file exist?", os.path.exists(self.xml_file)
''' Instantiate database object '''
#dbo = DB()
self.session = db.Session()
def read(self):
''' Takes an XML instance file and reads it into memory as a node tree '''
#print '** Raw XML:', self.xml_file
#if settings.DEBUG:
# print "does self.xml_file still exist?", os.path.exists(self.xml_file)
tree = etree.parse(self.xml_file)
#print '** Node tree:', tree
#self.xml_file.close()
return tree
def process_data(self, tree):
''' Shreds the XML document into the database and return the source ids '''
root_element = tree.getroot()
source_ids = parse_source(self, root_element)
return source_ids
''' Parse each table (other readers use these, so they're stand-alone methods)'''
def parse_source(self, root_element):
''' Loop through all sources and then traverse the tree for each export '''
''' There can be multiple sources with multiple exports inside each source '''
xpSources = '/hmis:Sources/hmis:Source'
source_list = root_element.xpath(xpSources, namespaces = self.nsmap)
if source_list is not None:
source_ids = []
for item in source_list:
self.parse_dict = {}
''' Element paths '''
xpSourceVersion = '../../@hmis:version'
xpSourceIDIDNum = 'hmis:SourceID/hmis:IDNum'
xpSourceIDIDStr = 'hmis:SourceID/hmis:IDStr'
xpSourceDelete = 'hmis:SourceID/@hmis:Delete'
xpSourceDeleteOccurredDate = 'hmis:SourceID/@hmis:DeleteOccurredDate'
xpSourceDeleteEffective = 'hmis:SourceID/@hmis:DeleteEffective'
xpSourceSoftwareVendor = 'hmis:SoftwareVendor'
xpSourceSoftwareVersion = 'hmis:SoftwareVersion'
xpSourceContactEmail = 'hmis:SourceContactEmail'
xpSourceContactExtension = 'hmis:SourceContactExtension'
xpSourceContactFirst = 'hmis:SourceContactFirst'
xpSourceContactLast = 'hmis:SourceContactLast'
xpSourceContactPhone = 'hmis:SourceContactPhone'
xpSourceName = 'hmis:SourceName'
#xp_source_exports = 'hmis:Export'
''' Map elements to database columns '''
existence_test_and_add(self, 'schema_version', item.xpath(xpSourceVersion, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'source_id_id_num', item.xpath(xpSourceIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_id_id_str', item.xpath(xpSourceIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_id_id_delete', item.xpath(xpSourceDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'source_id_id_delete_occurred_date', item.xpath(xpSourceDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'source_id_id_delete_effective_date', item.xpath(xpSourceDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'software_vendor', item.xpath(xpSourceSoftwareVendor, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'software_version', item.xpath(xpSourceSoftwareVersion, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_email', item.xpath(xpSourceContactEmail, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_extension', item.xpath(xpSourceContactExtension, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_first', item.xpath(xpSourceContactFirst, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_last', item.xpath(xpSourceContactLast, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_contact_phone', item.xpath(xpSourceContactPhone, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_name', item.xpath(xpSourceName, namespaces = self.nsmap), 'text')
source_id_str = item.xpath(xpSourceIDIDStr, namespaces = self.nsmap)
source_id_num = item.xpath(xpSourceIDIDNum, namespaces = self.nsmap)
if source_id_str:
#source_id = source_id_str[0].text
existence_test_and_add(self, 'source_id', source_id_str, 'text')
elif source_id_num:
#source_id = source_id_num[0].text
existence_test_and_add(self, 'source_id', source_id_num, 'text')
''' Shred to database '''
# keep a list of source ids as they are discovered
source_id = shred(self, self.parse_dict, Source)
if source_id != None:
source_ids.append(source_id)
#print "self.source_index_id is: ", self.source_index_id
# ''' Parse all exports for this specific source '''
# parse_export(self, item)
return source_ids
def parse_export(self, element):
''' loop through all exports and traverse the tree '''
''' Element paths '''
xpExport = 'hmis:Export'
xpExportIDIDNum = 'hmis:ExportID/hmis:IDNum'
xpExportIDIDStr = 'hmis:ExportID/hmis:IDStr'
xpExportDelete = 'hmis:ExportID/@hmis:delete'
xpExportDeleteOccurredDate = 'hmis:ExportID/@hmis:deleteOccurredDate'
xpExportDeleteEffective = 'hmis:ExportID/@hmis:deleteEffective'
xpExportExportDate = 'hmis:ExportDate'
xpExportPeriodStartDate = 'hmis:ExportPeriod/hmis:StartDate'
xpExportPeriodEndDate = 'hmis:ExportPeriod/hmis:EndDate'
itemElements = element.xpath(xpExport, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
test = item.xpath(xpExportIDIDNum, namespaces = self.nsmap)
if len(test) is 0:
test = item.xpath(xpExportIDIDStr, namespaces = self.nsmap)
self.export_id = test
existence_test_and_add(self, 'export_id', test, 'text')
else:
self.export_id = test
existence_test_and_add(self, 'export_id', test, 'text')
existence_test_and_add(self, 'export_id_id_num', item.xpath(xpExportIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'export_id_id_str', item.xpath(xpExportIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'export_id_delete', item.xpath(xpExportDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'export_id_delete_occurred_date', item.xpath(xpExportDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'export_id_delete_effective_date', item.xpath(xpExportDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'export_date', item.xpath(xpExportExportDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'export_period_start_date', item.xpath(xpExportPeriodStartDate, namespaces = self.nsmap), 'element_date')
existence_test_and_add(self, 'export_period_end_date', item.xpath(xpExportPeriodEndDate, namespaces = self.nsmap), 'element_date')
''' Shred to database '''
shred(self, self.parse_dict, Export)
''' Create source to export link '''
record_source_export_link(self)
''' Parse sub-tables '''
parse_household(self, item)
parse_region(self, item)
parse_agency(self, item)
parse_person(self, item)
parse_service(self, item)
parse_site(self, item)
parse_site_service(self, item)
return
def parse_household(self, element):
''' Element paths '''
xpHousehold = 'hmis:Household'
xpHouseholdIDIDNum = 'hmis:HouseholdID/hmis:IDNum'
xpHouseholdIDIDStr = 'hmis:HouseholdID/hmis:IDStr'
xpHeadOfHouseholdIDUnhashed = 'hmis:HeadOfHouseholdID/hmis:Unhashed'
xpHeadOfHouseholdIDHashed = 'hmis:HeadOfHouseholdID/hmis:Hashed'
itemElements = element.xpath(xpHousehold, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'household_id_num', item.xpath(xpHouseholdIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'household_id_str', item.xpath(xpHouseholdIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'head_of_household_id_unhashed', item.xpath(xpHeadOfHouseholdIDUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'head_of_household_id_hashed', item.xpath(xpHeadOfHouseholdIDHashed, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Household)
''' Parse sub-tables '''
parse_members(self, item)
def parse_members(self, element):
''' Element paths '''
xpMembers = 'hmis:Members'
xpMember = 'hmis:Member'
xpPersonIDUnhashed = 'hmis:PersonID/hmis:Unhashed'
xpPersonIDHashed = 'hmis:PersonID/hmis:Hashed'
xpRelationshipToHeadOfHousehold = 'hmis:RelationshipToHeadOfHousehold'
test = element.xpath(xpMembers, namespaces = self.nsmap)
if len(test) > 0:
itemElements = test[0].xpath(xpMember, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'person_id_unhashed', item.xpath(xpPersonIDUnhashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'person_id_hashed', item.xpath(xpPersonIDHashed, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'relationship_to_head_of_household', item.xpath(xpRelationshipToHeadOfHousehold, namespaces = self.nsmap), 'text')
''' Foreign Keys '''
existence_test_and_add(self, 'household_index_id', self.household_index_id, 'no_handling')
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Members)
def parse_region(self, element):
''' Element paths '''
xpRegion = 'hmis:Region'
xpRegionIDIDNum = 'hmis:RegionID/hmis:IDNum'
xpRegionIDIDStr = 'hmis:RegionID/hmis:IDStr'
xpSiteServiceID = 'hmis:SiteServiceID'
xpRegionType = 'hmis:RegionType'
xpRegionTypeDateCollected = 'hmis:RegionType/@hmis:dateCollected'
xpRegionTypeDateEffective = 'hmis:RegionType/@hmis:dateEffective'
xpRegionTypeDataCollectionStage = 'hmis:RegionType/@hmis:dataCollectionStage'
xpRegionDescription = 'hmis:RegionDescription'
xpRegionDescriptionDateCollected = 'hmis:RegionDescription/@hmis:dateCollected'
xpRegionDescriptionDateEffective = 'hmis:RegionDescription/@hmis:dateEffective'
xpRegionDescriptionDataCollectionStage = 'hmis:RegionDescription/@hmis:dataCollectionStage'
itemElements = element.xpath(xpRegion, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'region_id_id_num', item.xpath(xpRegionIDIDNum, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_id_id_str', item.xpath(xpRegionIDIDStr, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'site_service_id', item.xpath(xpSiteServiceID, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_type', item.xpath(xpRegionType, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_type_date_collected', item.xpath(xpRegionTypeDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_type_date_effective', item.xpath(xpRegionTypeDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_type_data_collection_stage', item.xpath(xpRegionTypeDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'region_description', item.xpath(xpRegionDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'region_description_date_collected', item.xpath(xpRegionDescriptionDateCollected, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_description_date_effective', item.xpath(xpRegionDescriptionDateEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'region_description_data_collection_stage', item.xpath(xpRegionDescriptionDataCollectionStage, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Region)
def parse_agency(self, element):
''' Element paths '''
xpAgency = 'hmis:Agency'
xpAgencyDelete = './@delete'
xpAgencyDeleteOccurredDate = './@deleteOccurredDate'
xpAgencyDeleteEffective = './@deleteEffective'
xpAirsKey = 'airs:Key'
xpAirsName = 'airs:Name'
xpAgencyDescription = 'airs:AgencyDescription'
xpIRSStatus = 'airs:IRSStatus'
xpSourceOfFunds = 'airs:SourceOfFunds'
#xpRecordOwner = '@hmis:RecordOwner'
xpRecordOwner = './@RecordOwner'
#xpFEIN = '@hmis:FEIN'
xpFEIN = './@FEIN'
xpYearInc = './@YearInc'
xpAnnualBudgetTotal = './@AnnualBudgetTotal'
xpLegalStatus = './@LegalStatus'
xpExcludeFromWebsite = './@ExcludeFromWebsite'
xpExcludeFromDirectory = './@ExcludeFromDirectory'
itemElements = element.xpath(xpAgency, namespaces = self.nsmap)
if itemElements is not None:
for item in itemElements:
self.parse_dict = {}
''' Map elements to database columns '''
existence_test_and_add(self, 'agency_delete', item.xpath(xpAgencyDelete, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'agency_delete_occurred_date', item.xpath(xpAgencyDeleteOccurredDate, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'agency_delete_effective_date', item.xpath(xpAgencyDeleteEffective, namespaces = self.nsmap), 'attribute_date')
existence_test_and_add(self, 'airs_key', item.xpath(xpAirsKey, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'airs_name', item.xpath(xpAirsName, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'agency_description', item.xpath(xpAgencyDescription, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'irs_status', item.xpath(xpIRSStatus, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'source_of_funds', item.xpath(xpSourceOfFunds, namespaces = self.nsmap), 'text')
existence_test_and_add(self, 'fein', item.xpath(xpFEIN, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'record_owner', item.xpath(xpRecordOwner, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'year_inc', item.xpath(xpYearInc, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'annual_budget_total', item.xpath(xpAnnualBudgetTotal, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'legal_status', item.xpath(xpLegalStatus, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_website', item.xpath(xpExcludeFromWebsite, namespaces = self.nsmap), 'attribute_text')
existence_test_and_add(self, 'exclude_from_directory', item.xpath(xpExcludeFromDirectory, namespaces = self.nsmap), 'attribute_text')
''' Foreign Keys '''
existence_test_and_add(self, 'export_index_id', self.export_index_id, 'no_handling')
''' Shred to database '''
shred(self, self.parse_dict, Agency)
''' Parse sub-tables '''
parse_agency_service(self, item)
parse_aka(self, item)
# SBB20100907 | |
<reponame>larryhastings/pyweek24
# json-map, a tiled JSON map renderer for pyglet
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A Tiled JSON map renderer for pyglet.
These classes use the JSON format as generated by Tiled JSON plugin.
`pyglet.resource` framework is used to load all the elements of the map, so
any path information must be removed from the tileset.
"""
import os
import json
import pyglet
from pyglet.graphics import OrderedGroup
from pyglet.sprite import Sprite
from pyglet import gl
__all__ = ['Map', "TileLayer", "ObjectGroup",]
def calculate_columns(image_width, tile_width, margin, spacing):
# works fine for rows too!
image_width -= margin * 2
if image_width < tile_width:
return 0
if image_width == tile_width:
return 1
columns = ((image_width - tile_width) // (tile_width + spacing)) + 1
assert ((columns * tile_width) + (spacing * (columns - 1))) == image_width
return columns
def get_texture_sequence(filename, tilewidth=32, tileheight=32, margin=1, spacing=1, nearest=False):
"""Returns a texture sequence of a grid generated from a tile set."""
image = pyglet.resource.image(filename)
region = image.get_region(margin, margin, image.width-margin*2, image.height-margin*2)
# we've already thrown away the margins
rows = calculate_columns(region.height, tileheight, margin=0, spacing=spacing)
cols = calculate_columns(region.width, tilewidth, margin=0, spacing=spacing)
grid = pyglet.image.ImageGrid(region,
rows,
cols,
row_padding=spacing,
column_padding=spacing,
)
texture = grid.get_texture_sequence()
if nearest:
gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
return texture
class BaseLayer(object):
"""
Base layer.
Takes care of the "visible" flag.
"""
# ordered group
groups = 0
def __init__(self, data, map):
self.data = data
self.map = map
if self.data["visible"]:
self.sprites = {}
self.group = OrderedGroup(BaseLayer.groups)
BaseLayer.groups += 1
class TileLayer(BaseLayer):
"""
Tile layer.
Provides a pythonic interface to the tile layer, including:
- Iterate through the tiles.
- Check if one coordinate exists in the layer.
- Get one tile of layer.
"""
def __iter__(self):
return iter(self.data)
def __contains__(self, index):
if type(index) != tuple:
raise TypeError("tuple expected")
x, y = index
return int(x+y*self.map.data["width"]) in self.data["data"]
def __getitem__(self, index):
if type(index) != tuple:
raise TypeError("tuple expected")
x, y = index
return self.data["data"][int(x+y*self.map.data["width"])]
def set_viewport(self, x, y, w, h):
tw = self.map.data["tilewidth"]
th = self.map.data["tileheight"]
def yrange(f, t, s):
while f < t:
yield f
f += s
in_use = set()
for j in yrange(y, y+h+th, th):
py = j//th
for i in yrange(x, x+w+tw, tw):
px = i//tw
coordinate = (px, py)
in_use.add(coordinate)
if coordinate not in self.sprites:
try:
texture = self.map.get_texture(self[coordinate])
sprite = Sprite(texture,
x=(px*tw),
y=h-(py*th)-th,
batch=self.map.batch,
group=self.group,
usage="static",
)
except (KeyError, IndexError):
sprite = None
self.sprites[coordinate] = sprite
unused_keys = set(self.sprites) - in_use
for key in unused_keys:
o = self.sprites.pop(key)
if o is not None:
o.delete()
class ObjectGroup(BaseLayer):
"""
Object Group Layer.
Only tile based objects will be drawn (not shape based).
Provides a pythonic interface to the object layer, including:
- Iterate through the objects.
- Check if one coordinate or an object name exists in the layer.
- Get one object based on its coordinates or its name.
Also it is possible to get a list of objects of the same type with
`ObjectGroup.get_by_type(type)`.
"""
def __init__(self, data, map):
super(ObjectGroup, self).__init__(data, map)
self.h = 0
self.objects = []
self._index = {}
self._index_type = {}
self._xy_index = {}
for obj in data["objects"]:
self.objects.append(obj)
name = obj.get("name", "?")
if name not in self._index:
self._index[name] = []
otype = obj.get("type", "?")
if otype not in self._index_type:
self._index_type[otype] = []
x = int(obj["x"])//self.map.data["tilewidth"]
y = int(obj["y"])//self.map.data["tileheight"]-1
coordinate = (x, y)
if coordinate not in self._xy_index:
self._xy_index[coordinate] = []
self._index[name].append(obj)
self._index_type[otype].append(obj)
self._xy_index[coordinate].append(obj)
# XXX: is this useful AT ALL?
self.objects.sort(key=lambda obj: obj["x"]+obj["y"]*self.map.data["width"])
def __iter__(self):
return iter(self.objects)
def __contains__(self, name):
if isinstance(name, tuple):
x, y = name
return (int(x), int(y)) in self._xy_index
return name in self._index
def __getitem__(self, name):
if isinstance(name, tuple):
x, y = name
# XXX: if there are several objects, expect the first one
return self._xy_index[int(x), int(y)][0]
return self._index[name]
def get_by_type(self, otype):
return self._index_type[otype]
def set_viewport(self, x, y, w, h):
self.h = h
tw = self.map.data["tilewidth"]
th = self.map.data["tileheight"]
in_use = set()
for obj in self.objects:
if x-tw < obj["x"] < x+w+tw and y-th < obj["y"] < y+h+th:
if not obj["visible"]:
continue
gid = obj.get("gid")
if gid:
coordinate = (obj["x"], obj["y"])
in_use.add(coordinate)
try:
texture = self.map.get_texture(gid)
tileoffset = self.map.get_tileoffset(gid)
sprite = Sprite(texture,
x=obj["x"]+tileoffset[0],
y=self.h-obj["y"]+tileoffset[1],
batch=self.map.batch,
group=self.group,
usage="static",
)
except (IndexError, KeyError):
sprite = None
self.sprites[coordinate] = sprite
unused_keys = set(self.sprites) - in_use
for key in unused_keys:
o = self.sprites.pop(key)
o.delete()
class Tileset(object):
"""Manages a tileset and it's used internally by TileLayer."""
def __init__(self, data, nearest=False):
self.data = data
# used to convert coordinates of the grid
self.columns = calculate_columns(self.data["imagewidth"], self.data["tilewidth"], spacing=self.data["spacing"], margin=self.data["margin"])
self.rows = calculate_columns(self.data["imageheight"], self.data["tileheight"], spacing=self.data["spacing"], margin=self.data["margin"])
# the image will be accessed using pyglet resources
self.image = os.path.basename(self.data["image"])
self.texture = get_texture_sequence(self.image, self.data["tilewidth"],
self.data["tileheight"],
self.data["margin"],
self.data["spacing"],
nearest=False,
)
def __getitem__(self, index):
return self.texture[index]
def __len__(self):
return len(self.texture)
class Map(object):
"""
Load, manage and render Tiled JSON files.
Maps can created providing the JSON data to this class or using `Map.load_json()`
and after that a viewport must be set with `Map.set_viewport()`.
"""
def __init__(self, data, nearest=False):
self.data = data
self.tilesets = {} # the order is not important
self.layers = []
self.tilelayers = {}
self.objectgroups = {}
for tileset in data["tilesets"]:
self.tilesets[tileset["name"]] = Tileset(tileset, nearest)
for layer in data["layers"]:
# TODO: test this!
if layer['name'] in (self.tilelayers, self.objectgroups):
raise ValueError("Duplicated layer name %s" % layer["name"])
if layer["type"] == "tilelayer":
self.layers.append(TileLayer(layer, self))
self.tilelayers[layer["name"]] = self.layers[-1]
elif layer["type"] == "objectgroup":
self.layers.append(ObjectGroup(layer, self))
self.objectgroups[layer["name"]] = self.layers[-1]
else:
raise ValueError("unsupported layer type %s, skipping" % layer["type"])
self.batch = pyglet.graphics.Batch()
# viewport
self.x = 0
self.y = 0
self.w = 0
self.h = 0
# focus
self.fx = None
self.fy = None
# useful (size in pixels)
self.p_width = self.data["width"]*self.data["tilewidth"]
self.p_height = self.data["height"]*self.data["tileheight"]
# build a texture index converting pyglet indexing of the texture grid
# to tiled coordinate system
self.tileoffset_index = {}
self.texture_index = {}
for tileset in self.tilesets.values():
for y in range(tileset.rows):
for x in range(tileset.columns):
self.texture_index[x+y*tileset.columns+tileset.data["firstgid"]] = \
tileset[(tileset.rows-1-y),x]
# TODO: test this!
if "tileoffset" in tileset.data:
self.tileoffset_index[x+y*tileset.columns+tileset.data["firstgid"]] = \
(tileset.data["tileoffset"]["x"], tileset.data["tileoffset"]["y"])
def invalidate(self):
"""Forces a batch update of the map."""
self.set_viewport(self.x, self.y, self.w, self.h, True)
def set_viewport(self, x, y, w, h, force=False):
"""
Sets the map viewport to the screen coordinates.
Optionally the force flag can be used to update the batch even if the
viewport didn't change (this should be used via `Map.invalidate()`).
"""
# x and y can be floats
vx = max(x, 0)
vy = max(y, 0)
vx = min(vx, (self.p_width)-w)
vy = min(vy, (self.p_height)-h)
vw = int(w)
vh = int(h)
if not any([force, vx!=self.x, vy!=self.y, vw!=self.w, vh!=self.h]):
return
self.x = vx
self.y = vy
self.w = vw
self.h = vh
for layer in self.layers:
if layer.data["visible"]:
layer.set_viewport(self.x, self.y, self.w, self.h)
def set_focus(self, x, y=None):
"""Sets the focus in (x, y) world coordinates."""
if y == None:
y = int(x[1])
x = int(x[0])
else:
x = int(x)
y = int(y)
if self.fx == x and self.fy == y:
return
self.fx = x
self.fy = y
vx = max(x-(self.w//2), 0)
vy = max(y-(self.h//2), 0)
if | |
return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashLog)
@property
def ldm_min_match(self):
return _get_compression_parameter(self._params, lib.ZSTD_c_ldmMinMatch)
@property
def ldm_bucket_size_log(self):
return _get_compression_parameter(
self._params, lib.ZSTD_c_ldmBucketSizeLog
)
@property
def ldm_hash_rate_log(self):
return _get_compression_parameter(
self._params, lib.ZSTD_c_ldmHashRateLog
)
@property
def threads(self):
return _get_compression_parameter(self._params, lib.ZSTD_c_nbWorkers)
def estimated_compression_context_size(self):
"""Estimated size in bytes needed to compress with these parameters."""
return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params)
def estimate_decompression_context_size():
"""Estimate the memory size requirements for a decompressor instance.
:return:
Integer number of bytes.
"""
return lib.ZSTD_estimateDCtxSize()
def _set_compression_parameter(params, param, value):
zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"unable to set compression context parameter: %s"
% _zstd_error(zresult)
)
def _get_compression_parameter(params, param):
result = ffi.new("int *")
zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"unable to get compression context parameter: %s"
% _zstd_error(zresult)
)
return result[0]
class ZstdCompressionWriter(object):
"""Writable compressing stream wrapper.
``ZstdCompressionWriter`` is a write-only stream interface for writing
compressed data to another stream.
This type conforms to the ``io.RawIOBase`` interface and should be usable
by any type that operates against a *file-object* (``typing.BinaryIO``
in Python type hinting speak). Only methods that involve writing will do
useful things.
As data is written to this stream (e.g. via ``write()``), that data
is sent to the compressor. As compressed data becomes available from
the compressor, it is sent to the underlying stream by calling its
``write()`` method.
Both ``write()`` and ``flush()`` return the number of bytes written to the
object's ``write()``. In many cases, small inputs do not accumulate enough
data to cause a write and ``write()`` will return ``0``.
Calling ``close()`` will mark the stream as closed and subsequent I/O
operations will raise ``ValueError`` (per the documented behavior of
``io.RawIOBase``). ``close()`` will also call ``close()`` on the underlying
stream if such a method exists and the instance was constructed with
``closefd=True``
Instances are obtained by calling :py:meth:`ZstdCompressor.stream_writer`.
Typically usage is as follows:
>>> cctx = zstandard.ZstdCompressor(level=10)
>>> compressor = cctx.stream_writer(fh)
>>> compressor.write(b"chunk 0\\n")
>>> compressor.write(b"chunk 1\\n")
>>> compressor.flush()
>>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\n`` at this point.
>>> # Receiver is also expecting more data in the zstd *frame*.
>>>
>>> compressor.write(b"chunk 2\\n")
>>> compressor.flush(zstandard.FLUSH_FRAME)
>>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\nchunk 2``.
>>> # Receiver is expecting no more data, as the zstd frame is closed.
>>> # Any future calls to ``write()`` at this point will construct a new
>>> # zstd frame.
Instances can be used as context managers. Exiting the context manager is
the equivalent of calling ``close()``, which is equivalent to calling
``flush(zstandard.FLUSH_FRAME)``:
>>> cctx = zstandard.ZstdCompressor(level=10)
>>> with cctx.stream_writer(fh) as compressor:
... compressor.write(b'chunk 0')
... compressor.write(b'chunk 1')
... ...
.. important::
If ``flush(FLUSH_FRAME)`` is not called, emitted data doesn't
constitute a full zstd *frame* and consumers of this data may complain
about malformed input. It is recommended to use instances as a context
manager to ensure *frames* are properly finished.
If the size of the data being fed to this streaming compressor is known,
you can declare it before compression begins:
>>> cctx = zstandard.ZstdCompressor()
>>> with cctx.stream_writer(fh, size=data_len) as compressor:
... compressor.write(chunk0)
... compressor.write(chunk1)
... ...
Declaring the size of the source data allows compression parameters to
be tuned. And if ``write_content_size`` is used, it also results in the
content size being written into the frame header of the output data.
The size of chunks being ``write()`` to the destination can be specified:
>>> cctx = zstandard.ZstdCompressor()
>>> with cctx.stream_writer(fh, write_size=32768) as compressor:
... ...
To see how much memory is being used by the streaming compressor:
>>> cctx = zstandard.ZstdCompressor()
>>> with cctx.stream_writer(fh) as compressor:
... ...
... byte_size = compressor.memory_size()
Thte total number of bytes written so far are exposed via ``tell()``:
>>> cctx = zstandard.ZstdCompressor()
>>> with cctx.stream_writer(fh) as compressor:
... ...
... total_written = compressor.tell()
``stream_writer()`` accepts a ``write_return_read`` boolean argument to
control the return value of ``write()``. When ``False`` (the default),
``write()`` returns the number of bytes that were ``write()``'en to the
underlying object. When ``True``, ``write()`` returns the number of bytes
read from the input that were subsequently written to the compressor.
``True`` is the *proper* behavior for ``write()`` as specified by the
``io.RawIOBase`` interface and will become the default value in a future
release.
"""
def __init__(
self,
compressor,
writer,
source_size,
write_size,
write_return_read,
closefd=True,
):
self._compressor = compressor
self._writer = writer
self._write_size = write_size
self._write_return_read = bool(write_return_read)
self._closefd = bool(closefd)
self._entered = False
self._closing = False
self._closed = False
self._bytes_compressed = 0
self._dst_buffer = ffi.new("char[]", write_size)
self._out_buffer = ffi.new("ZSTD_outBuffer *")
self._out_buffer.dst = self._dst_buffer
self._out_buffer.size = len(self._dst_buffer)
self._out_buffer.pos = 0
zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"error setting source size: %s" % _zstd_error(zresult)
)
def __enter__(self):
if self._closed:
raise ValueError("stream is closed")
if self._entered:
raise ZstdError("cannot __enter__ multiple times")
self._entered = True
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._entered = False
self.close()
self._compressor = None
return False
def memory_size(self):
return lib.ZSTD_sizeof_CCtx(self._compressor._cctx)
def fileno(self):
f = getattr(self._writer, "fileno", None)
if f:
return f()
else:
raise OSError("fileno not available on underlying writer")
def close(self):
if self._closed:
return
try:
self._closing = True
self.flush(FLUSH_FRAME)
finally:
self._closing = False
self._closed = True
# Call close() on underlying stream as well.
f = getattr(self._writer, "close", None)
if self._closefd and f:
f()
@property
def closed(self):
return self._closed
def isatty(self):
return False
def readable(self):
return False
def readline(self, size=-1):
raise io.UnsupportedOperation()
def readlines(self, hint=-1):
raise io.UnsupportedOperation()
def seek(self, offset, whence=None):
raise io.UnsupportedOperation()
def seekable(self):
return False
def truncate(self, size=None):
raise io.UnsupportedOperation()
def writable(self):
return True
def writelines(self, lines):
raise NotImplementedError("writelines() is not yet implemented")
def read(self, size=-1):
raise io.UnsupportedOperation()
def readall(self):
raise io.UnsupportedOperation()
def readinto(self, b):
raise io.UnsupportedOperation()
def write(self, data):
"""Send data to the compressor and possibly to the inner stream."""
if self._closed:
raise ValueError("stream is closed")
total_write = 0
data_buffer = ffi.from_buffer(data)
in_buffer = ffi.new("ZSTD_inBuffer *")
in_buffer.src = data_buffer
in_buffer.size = len(data_buffer)
in_buffer.pos = 0
out_buffer = self._out_buffer
out_buffer.pos = 0
while in_buffer.pos < in_buffer.size:
zresult = lib.ZSTD_compressStream2(
self._compressor._cctx,
out_buffer,
in_buffer,
lib.ZSTD_e_continue,
)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"zstd compress error: %s" % _zstd_error(zresult)
)
if out_buffer.pos:
self._writer.write(
ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
)
total_write += out_buffer.pos
self._bytes_compressed += out_buffer.pos
out_buffer.pos = 0
if self._write_return_read:
return in_buffer.pos
else:
return total_write
def flush(self, flush_mode=FLUSH_BLOCK):
"""Evict data from compressor's internal state and write it to inner stream.
Calling this method may result in 0 or more ``write()`` calls to the
inner stream.
This method will also call ``flush()`` on the inner stream, if such a
method exists.
:param flush_mode:
How to flush the zstd compressor.
``zstandard.FLUSH_BLOCK`` will flush data already sent to the
compressor but not emitted to the inner stream. The stream is still
writable after calling this. This is the default behavior.
See documentation for other ``zstandard.FLUSH_*`` constants for more
flushing options.
:return:
Integer number of bytes written to the inner stream.
"""
if flush_mode == FLUSH_BLOCK:
flush = lib.ZSTD_e_flush
elif flush_mode == FLUSH_FRAME:
flush = lib.ZSTD_e_end
else:
raise ValueError("unknown flush_mode: %r" % flush_mode)
if self._closed:
raise ValueError("stream is closed")
total_write = 0
out_buffer = self._out_buffer
out_buffer.pos = 0
in_buffer = ffi.new("ZSTD_inBuffer *")
in_buffer.src = ffi.NULL
in_buffer.size = 0
in_buffer.pos = 0
while True:
zresult = lib.ZSTD_compressStream2(
self._compressor._cctx, out_buffer, in_buffer, flush
)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"zstd compress error: %s" % _zstd_error(zresult)
)
if out_buffer.pos:
self._writer.write(
ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
)
total_write += out_buffer.pos
self._bytes_compressed += out_buffer.pos
out_buffer.pos = 0
if not zresult:
break
f = getattr(self._writer, "flush", None)
if f and not self._closing:
f()
return total_write
def tell(self):
return self._bytes_compressed
class ZstdCompressionObj(object):
"""A compressor conforming to the API in Python's standard library.
This type implements an API similar to compression types in Python's
standard library such as ``zlib.compressobj`` and ``bz2.BZ2Compressor``.
This enables existing code targeting the standard library API to swap
in this type to achieve zstd compression.
.. important::
The design of this API is not ideal for optimal performance.
The reason performance is not optimal is because the API is | |
from lowest to highest amplitude value
sort = np.argsort(np.array(amps_fit)[indices_blended])
return indices_blended[sort]
def remove_components(params_fit, remove_indices):
"""Remove parameters of Gaussian fit components.
Parameters
----------
params_fit : list
Parameter vector in the form of [amp1, ..., ampN, fwhm1, ..., fwhmN, mean1, ..., meanN].
remove_indices : int, list, np.ndarray
Indices of Gaussian fit components, whose parameters should be removed from params_fit.
Returns
-------
params_fit : list
Updated list from which the parameters of the selected Gaussian fit components were removed.
"""
ncomps_fit = number_of_components(params_fit)
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
if isinstance(remove_indices, np.ndarray):
remove_indices = list(remove_indices)
elif not isinstance(remove_indices, list):
remove_indices = [remove_indices]
amps_fit = list(np.delete(np.array(amps_fit), remove_indices))
fwhms_fit = list(np.delete(np.array(fwhms_fit), remove_indices))
offsets_fit = list(np.delete(np.array(offsets_fit), remove_indices))
params_fit = amps_fit + fwhms_fit + offsets_fit
return params_fit
def get_best_fit(vel, data, errors, params_fit, dct, first=False,
best_fit_list=None, signal_ranges=None, signal_mask=None,
force_accept=False, params_min=None, params_max=None,
noise_spike_mask=None):
"""Determine new best fit for spectrum.
If this is the first fit iteration for the spectrum a new best fit is assigned and its parameters are returned in best_fit_list.
If it is not the first fit iteration, the new fit is compared to the current best fit supplied in best_fit_list. If the new fit is preferred (decided via the AICc criterion), the parameters of the new fit are returned in best_fit_list. Otherwise, the old best_fit_list is returned.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
params_fit : list
Parameter vector in the form of [amp1, ..., ampN, fwhm1, ..., fwhmN, mean1, ..., meanN].
dct : dict
Dictionary containing parameter settings for the improved fitting.
first : bool
Default is 'False'. If set to 'True', the new fit will be assigned as best fit and returned in best_fit_list.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
params_min : list
List of minimum limits for parameters: [min_amp1, ..., min_ampN, min_fwhm1, ..., min_fwhmN, min_mean1, ..., min_meanN]
params_max : list
List of maximum limits for parameters: [max_amp1, ..., max_ampN, max_fwhm1, ..., max_fwhmN, max_mean1, ..., max_meanN]
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
"""
if not first:
best_fit_list[7] = False
quality_control = best_fit_list[11]
else:
quality_control = []
ncomps_fit = number_of_components(params_fit)
params_fit, params_errs, ncomps_fit = perform_least_squares_fit(
vel, data, errors, params_fit, dct, params_min=params_min, params_max=params_max)
# check if fit components satisfy mandatory criteria
if ncomps_fit > 0:
refit = True
while refit:
params_fit, params_errs, ncomps_fit, params_min, params_max, quality_control, refit = check_params_fit(
vel, data, errors, params_fit, params_errs, dct,
quality_control, signal_ranges=signal_ranges)
best_fit = func(vel, *params_fit).ravel()
else:
best_fit = data * 0
rchi2, aicc = goodness_of_fit(
data, best_fit, errors, ncomps_fit, mask=signal_mask, get_aicc=True)
residual = data - best_fit
pvalue = check_residual_for_normality(residual, errors, mask=signal_mask,
noise_spike_mask=noise_spike_mask)
# return the list of best fit results if there was no old list of best fit results for comparison
if first:
new_fit = True
return [params_fit, params_errs, ncomps_fit, best_fit, residual, rchi2,
aicc, new_fit, params_min, params_max, pvalue, quality_control]
# return new best_fit_list if the AICc value is smaller
aicc_old = best_fit_list[6]
if ((aicc < aicc_old) and not np.isclose(aicc, aicc_old, atol=1e-1)) or force_accept:
new_fit = True
return [params_fit, params_errs, ncomps_fit, best_fit, residual, rchi2,
aicc, new_fit, params_min, params_max, pvalue, quality_control]
# return old best_fit_list if the aicc value is higher
best_fit_list[7] = False
return best_fit_list
def check_for_negative_residual(vel, data, errors, best_fit_list, dct,
signal_ranges=None, signal_mask=None,
force_accept=False, get_count=False,
get_idx=False, noise_spike_mask=None):
"""Check for negative residual features and try to refit them.
We define negative residual features as negative peaks in the residual that were introduced by the fit. These negative peaks have to have a minimum negative signal-to-noise ratio of dct['snr_negative'].
In case of a negative residual feature, we try to replace the Gaussian fit component that is causing the feature with two narrower components. We only accept this solution if it yields a better fit as determined by the AICc value.
Parameters
----------
vel : numpy.ndarray
Velocity channels (unitless).
data : numpy.ndarray
Original data of spectrum.
errors : numpy.ndarray
Root-mean-square noise values.
best_fit_list : list
List containing parameters of the current best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
dct : dict
Dictionary containing parameter settings for the improved fitting.
signal_ranges : list
Nested list containing info about ranges of the spectrum that were estimated to contain signal. The goodness-of-fit calculations are only performed for the spectral channels within these ranges.
signal_mask : numpy.ndarray
Boolean array containing the information of signal_ranges.
force_accept : bool
Experimental feature. Default is 'False'. If set to 'True', the new fit will be forced to become the best fit.
get_count : bool
Default is 'False'. If set to 'True', only the number of occurring negative residual features will be returned.
get_idx : bool
Default is 'False'. If set to 'True', the index of the Gaussian fit component causing the negative residual feature is returned. In case of multiple negative residual features, only the index of one of them is returned.
Returns
-------
best_fit_list : list
List containing parameters of the chosen best fit for the spectrum. It is of the form [{0} params_fit, {1} params_errs, {2} ncomps_fit, {3} best_fit, {4} residual, {5} rchi2, {6} aicc, {7} new_fit, {8} params_min, {9} params_max, {10} pvalue]
"""
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
# in case a single rms value is given instead of an array
if not isinstance(errors, np.ndarray):
errors = np.ones(len(data)) * errors
if ncomps_fit == 0:
if get_count:
return 0
return best_fit_list
residual = best_fit_list[4]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
amp_guesses, fwhm_guesses, offset_guesses = get_initial_guesses(
residual, errors[0], dct['snr_negative'], dct['significance'],
peak='negative')
# check if negative residual feature was already present in the data
remove_indices = []
for i, offset in enumerate(offset_guesses):
if residual[offset] > (data[offset] - dct['snr']*errors[0]):
remove_indices.append(i)
if len(remove_indices) > 0:
amp_guesses, fwhm_guesses, offset_guesses = remove_components_from_sublists(
[amp_guesses, fwhm_guesses, offset_guesses], remove_indices)
if get_count:
return (len(amp_guesses))
if len(amp_guesses) == 0:
return best_fit_list
# in case of multiple negative residual features, sort them in order of increasing amplitude values
sort = np.argsort(amp_guesses)
amp_guesses = np.array(amp_guesses)[sort]
fwhm_guesses = np.array(fwhm_guesses)[sort]
offset_guesses = np.array(offset_guesses)[sort]
for amp, fwhm, offset in zip(amp_guesses, fwhm_guesses, offset_guesses):
idx_low = max(0, int(offset - fwhm))
idx_upp = int(offset + fwhm) + 2
exclude_idx = check_which_gaussian_contains_feature(
idx_low, idx_upp, fwhms_fit, offsets_fit)
if get_idx:
return exclude_idx
if exclude_idx is None:
continue
params_fit = replace_gaussian_with_two_new_ones(
data, vel, errors[0], dct['snr'], dct['significance'],
params_fit, exclude_idx, offset)
best_fit_list = get_best_fit(
vel, data, errors, params_fit, dct, first=False,
best_fit_list=best_fit_list, signal_ranges=signal_ranges,
signal_mask=signal_mask, force_accept=force_accept,
noise_spike_mask=noise_spike_mask)
params_fit = best_fit_list[0]
ncomps_fit = best_fit_list[2]
amps_fit, fwhms_fit, offsets_fit = split_params(params_fit, ncomps_fit)
return best_fit_list
def try_fit_with_new_components(vel, data, errors, best_fit_list, dct,
exclude_idx, signal_ranges=None,
signal_mask=None, force_accept=False,
baseline_shift_snr=0, noise_spike_mask=None):
"""Exclude Gaussian fit component and try fit with new initial guesses.
First we try a new refit by just removing the component (i) and adding no new components. If this does not work we determine guesses for additional fit components from the residual that is produced if the component (i) is discarded and try a new fit. We only accept the new fit solution if it yields a better fit as | |
issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
fname = cs_path+os.sep+code+".state_serialized"
if os.path.exists(fname):
os.remove(fname)
r = build_json_response({}, 204)
else:
r = build_json_response({"error": f"A state with code {code} did not exist"}, 401)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot delete state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state/", methods=["GET"])
def reproducible_session_list_states(): # List available states
"""
List codes of all previously saved states
:return: A JSON with a single entry "codes", with a list of the codes to address the saved states. Error if there is an issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
lst = [f for f in os.listdir(cs_path) if os.path.isfile(f"{cs_path}{os.sep}{f}")]
r = build_json_response({"codes": lst}, 204)
else:
r = build_json_response({"error": "Cannot return the list of states, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state", methods=["GET"])
def reproducible_session_load_state():
"""
Loads a previously saved state in the reproducible session. After this call, output datasets can be retrieved or
new parameters for the dynamic scenario submitted.
A "code" Query parameter must be passed with a code for the saved state.
:return: Empty if everything is ok (the state is on the backend side). Error if there is an issue
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
cs_path = nexinfosys.get_global_configuration_variable("CASE_STUDIES_DIR")
code = request.args.get("code", None)
try:
string_to_ast(simple_ident, code)
except:
code = None
if code is None:
r = build_json_response({"error": "Query parameter 'code' is mandatory"}, 401)
else:
fname = cs_path + os.sep + code + ".state_serialized"
with open(fname, "rt") as f:
s = f.read()
isess.state = deserialize_state(s)
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot load state, no open reproducible session"}, 401)
return r
# ----------------------------------------------------------------------------------------------------------------------
@app.route(nis_api_base + "/isession/rsession/state.pickled", methods=["GET"])
def reproducible_session_get_state(): # Return current status of ReproducibleSession
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
r = build_json_response(jsonpickle.encode(isess.state), 200)
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query", methods=["GET"])
def reproducible_session_query_state(): # Query aspects of State
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
# TODO Parse query, execute it, return results
# TODO By concept: Datasets, processors, factors, factor types, hierarchies, mappings, ISSUES (extra MuSIASEM, errors in some level: syntax, semantics, solving)
# TODO Information: name, quantitites (attached to factors), relations, hierarchy (for hierarchies)
# TODO By observer
pass
else:
r = build_json_response({}, 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/issues", methods=["GET"])
def reproducible_session_query_state_list_issues(): # Query list of issues IN the current state
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
issues = isess.state.get("_issues")
if not issues:
issues = []
r = build_json_response({"issues": issues}, 200)
else:
r = build_json_response([], 204)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/everything_executed", methods=["GET"])
def reproducible_session_query_state_everything_executed(): # Query if all commands have been executed
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
first_i = len(isess.reproducible_session.ws_commands)
for i in range(len(isess.reproducible_session.ws_commands) - 1, -1, -1):
c = isess.reproducible_session.ws_commands[i]
if not c.execution_start:
first_i = i
r = build_json_response({"everything_executed": first_i == len(isess.reproducible_session.ws_commands)}, 200)
else:
r = build_json_response({"error": "Cannot return state, no open reproducible session"}, 401)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/outputs", methods=["GET"])
@app.route(nis_api_base + "/isession/rsession/state_query/datasets", methods=["GET"])
def reproducible_session_query_state_list_results(): # Query list of outputs (not only datasets) IN the current state
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# A reproducible session must be open, signal about it if not
if isess.reproducible_session_opened():
if isess.state:
r = build_json_response(get_results_in_session(isess), 200)
else:
r = build_json_response([], 204)
else:
r = build_json_response({"error": "Cannot return list of results, no reproducible session open"}, 401)
printNProcessors("LIST OF OUTPUTS", isess.state)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/webdav", methods=["PUT"])
def copy_resource_to_webdav():
"""
Read a resource and put the result into WebDAV server
PROBABLY REQUIRES MULTIPLE WORKERS because datasets are obtained via a recursive "RESTful call"
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
tmp = request.get_json()
source_url = tmp["sourceURL"]
target_url = tmp["targetURL"]
from urllib.parse import urlparse
pr = urlparse(target_url)
# Check host
wv_host_name = nexinfosys.get_global_configuration_variable("FS_SERVER") \
if nexinfosys.get_global_configuration_variable("FS_SERVER") else "nextcloud.data.magic-nexus.eu"
if wv_host_name.lower() != pr.netloc:
return build_json_response({"error": f"Cannot save the file in the requested server location, {pr.netloc}, which is different from the configured one, {wv_host_name}"}, 401)
# Modify URL
target_url = f"{pr.scheme}://{pr.netloc}{os.path.split(pr.path)[0]}"
pr = urlparse(source_url)
target_url += f"/{os.path.split(pr.path)[1]}"
# READ (reentrant)
self_schema = nexinfosys.get_global_configuration_variable("SELF_SCHEMA") \
if nexinfosys.get_global_configuration_variable("SELF_SCHEMA") else request.host_url
import requests
requested_resource = f"{self_schema}{source_url[1:]}"
logging.debug(f"REENTRANT REQUEST: {requested_resource}")
r = requests.get(requested_resource, cookies=request.cookies, verify=False)
# WRITE
wv_upload_file(io.BytesIO(r.content), target_url)
logging.debug(f"REQUESTED RESOURCE UPLOADED TO NEXTCLOUD at {target_url}")
return build_json_response([], 204)
# -- DYNAMIC PARAMETERS --
@app.route(nis_api_base + "/isession/rsession/state_query/parameters", methods=["GET"])
def get_parameter_definitions():
"""
Obtain a JSON enumerating the definition of all the parameters for the case study
:param format:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
res = get_parameters_in_state(isess.state)
return build_json_response(res, 200)
@app.route(nis_api_base + "/isession/rsession/state_query/parameters", methods=["PUT"])
def set_parameters_and_solve():
"""
Create an "interactive" scenario, composed by a dictionary of parameter values,
passed through a JSON in the request, and SOLVE this single scenario.
As results, create a supermatrix containing only this scenario, and the MatrixIndicators
:return:
"""
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
parameters = request.get_json()
issues2 = prepare_and_solve_model(isess.state, parameters)
# Return "issues2", issues found during the solving
isess.state.set("_issues", issues2)
# Return outputs (could be a list of binary files)
r = build_json_response({"issues": convert_issues(issues2), "outputs": None}, 200)
# Must serialize in order to later recover the datasets
serialize_isession_and_close_db_session(isess)
return r
@app.route(nis_api_base + "/isession/rsession/state_query/scenarios", methods=["GET"])
def get_scenarios():
"""
Return a list scenarios and values for parameters in each of them
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
scenarios = get_scenarios_in_state(isess.state)
return build_json_response(scenarios, 200)
@app.route(nis_api_base + "/isession/rsession/state_query/geolayer.<format>", methods=["GET"])
def get_geolayer_service(format):
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
content, content_type, ok = get_geolayer(isess.state, format)
return Response(content, mimetype=content_type, status=200 if ok else 401)
@app.route(nis_api_base + "/isession/rsession/state_query/ontology.<format>", methods=["GET"])
def get_ontology_service(format):
# TODO OWLREADY2 installation on the Docker image issues a problem
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
content, content_type, ok = get_ontology(isess.state, format)
return Response(content, mimetype=content_type, status=200 if ok else 401)
@app.route(nis_api_base + "/isession/rsession/state_query/python_script.<format>", methods=["GET"])
def get_python_script(format):
"""
script capaz de reproducir lo ejecutado
* login
* open
* load_workbook
* load_workbook desde Nextcloud, sin credenciales
* mostrar cómo obtener cada uno de los datasets, comentado (llamar a "query_state_list_results(isess)")
* mostrar cómo utilizar cada uno de los datasets, comentado también
* Jupyter sólo: script capaz de relanzar, selección de parámetros, reejecución, recogida de datasets (igual)
:param format:
:return:
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
output = None
# Generate graph from State
if isess.state:
if format == "python":
# TODO Prepare Python file
output = io.StringIO()
mimetype = "application/x-python-code" # or text/x-python
elif format == "jupyternotebook":
output = generate_jupyter_notebook_python(isess.state)
mimetype = "application/x-ipynb+json" # TODO
if output:
return Response(output, mimetype=mimetype, status=200)
else:
return build_json_response({"error": F"Cannot return Python | |
<gh_stars>1-10
from os import path
import numpy as np
from cycgkit.cgtypes import vec3
from glaze.GL import *
from glaze.utils import sizeofArray
from .RenderTarget_OGL3 import RenderTarget
from .shader_management.ShadersManagerClass import ShadersManager
from ..RenderTargetBase import attachmentTypeEnum, renderTextureTypeEnum
from ..base_backend import BaseBackend, _setBoneTransformationsForMesh, _setObjectUniforms, _setSceneUniforms, \
setMaterialValues
from ...fse_management.FSEManagerClass import FSEManager, FullScreenEffect
class OGL3Backend(BaseBackend):
def __init__(self, engine):
super(OGL3Backend, self).__init__()
self._engine = engine
self._currentRenderTarget = None
self.fullScreenEffects = FSEManager(engine,
self) # todo: move FSEManager to engine?, so all fseffects are shared
# among windows
self._defaultClearColor = vec3(0.50, 0.50, 0.50)
self._lastClearColor = None
self._setClearColor(self._defaultClearColor)
self._shaders = ShadersManager()
self._shaders.initialize(engine)
self._textures = self._engine.textures
self._poliCount = 0
self._drawingData = None
self._shaderOverride = None
self._currentFSE = None
self._lastShader = None
self._vertexBuffers = {}
self._indexBuffers = {}
self._cube_array = np.array([[-1, 1, 1, 0], [-1, 1, -1, 1], [1, 1, -1, 2],
[-1, 1, 1, 3], [1, 1, -1, 4], [1, 1, 1, 5],
[-1, -1, 1, 0], [-1, 1, 1, 1], [1, 1, 1, 2],
[-1, -1, 1, 3], [1, 1, 1, 4], [1, -1, 1, 5],
[1, -1, 1, 0], [1, 1, 1, 1], [1, 1, -1, 2],
[1, -1, 1, 3], [1, 1, -1, 4], [1, -1, -1, 5],
[1, -1, -1, 0], [1, 1, -1, 1], [-1, 1, -1, 2],
[1, -1, -1, 3], [-1, 1, -1, 4], [-1, -1, -1, 5],
[-1, -1, -1, 0], [-1, 1, -1, 1], [-1, 1, 1, 2],
[-1, -1, -1, 3], [-1, 1, 1, 4], [-1, -1, 1, 5],
[-1, -1, 1, 0], [1, -1, 1, 1], [1, -1, -1, 2],
[-1, -1, 1, 3], [1, -1, -1, 4], [-1, -1, -1, 5]], dtype=np.float32).flatten()
self._cubeVBO = None
self._screenQuad_array = np.array([[-1, -1, -0.1], [1, -1, -0.1], [-1, 1, -0.1],
[1, -1, -0.1], [1, 1, -0.1], [-1, 1, -0.1]], dtype=np.float32).flatten()
self._screenQuadVBO = None
self._screenQuadShader = None
# debug>>>>>>>>>>>>>>>
self.__isDebugEnabled = False
self._wireShader = None
# <<<<<<<<<<<<<<<<<<<<
self._culling = True
self.setContextState()
def setContextState(self):
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS)
glEnable(GL_MULTISAMPLE)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
# glDepthMask(GL_FALSE)
glDepthFunc(GL_LEQUAL)
# glDepthFunc(GL_LESS)
glDepthRange(0, 1.0)
glCullFace(GL_BACK)
glFrontFace(GL_CCW)
glEnable(GL_BLEND)
glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glClearDepth(1.0)
@property
def culling(self):
return self._culling
@culling.setter
def culling(self, value):
if value:
glEnable(GL_CULL_FACE) # <<<<<<<<<<<< Culling
else:
glDisable(GL_CULL_FACE)
self._culling = value
def getShadersManager(self):
return self._shaders
@staticmethod
def getRenderTarget():
return RenderTarget
def _setClearColor(self, color):
"""
@type color: vec3
"""
if not self._lastClearColor or self._lastClearColor != color:
r, g, b = color
glClearColor(r, g, b, 1.0)
self._lastClearColor = color
def drawAll(self, drawingData):
self._drawingData = drawingData
self._poliCount = 0
# postUsed = False
sceneNeeded = False
allEffects = self.fullScreenEffects._orderedEffects
effectsRange = range(allEffects.__len__())
if effectsRange.__len__() > 0:
firstEffect = allEffects[0]
firstPass = firstEffect.getActiveTechnique().passes[0]
lastEffect = allEffects[effectsRange.__len__() - 1]
lastTech = lastEffect.getActiveTechnique()
lastPass = lastTech.passes[lastTech.passes.__len__() - 1]
else:
firstEffect = None
firstPass = None
# lastEffect = None
# lastTech = None
lastPass = None
for effect in allEffects:
if effect.getActiveTechnique().needsScene and (
effectsRange.__len__() > 1 or len(effect.getActiveTechnique().passes) > 1):
sceneNeeded = True
break
native = self.performScenePass(firstEffect, firstPass, sceneNeeded, lastPass)
# todo: add skybox target parameter to fse to deactivate colorattachments accordingly before this.
# if drawingData.sky is not None:
# if self._cubeVBO is None:
# self.__createCubeVBO()
# self._cubeVBO.bind()
# self.drawSky(drawingData.sky)
# self._cubeVBO.unbind()
if firstPass is not lastPass:
for effect in allEffects:
self._renderFSEffect(effect, (not native), lastPass)
native = True
def performScenePass(self, firstEffect, firstPass, sceneNeeded, lastPass):
if sceneNeeded:
if not self.fullScreenEffects._sceneRT:
self.fullScreenEffects._initializeSceneRT()
self.fullScreenEffects._sceneRT._activate([attachmentTypeEnum.depth], colorIndexes=[0])
if not self._screenQuadVBO:
self.__createScreenQuadStuff()
if firstEffect and '_raw' in firstPass.members['in']:
self._renderFSEffect(firstEffect, lastPass=lastPass, isScenePass=True)
return False
else:
self.drawScene(0)
return True
def _renderFSEffect(self, effect, fromOverride=False, lastPass=None, isScenePass=False):
tech = effect.getActiveTechnique()
if tech:
self._currentFSE = effect
assert isinstance(effect, FullScreenEffect)
if isScenePass:
passRange = [0]
else:
passRange = range(len(tech.passes))
for i in passRange:
if fromOverride and i == 0:
continue
passOb = tech.passes[i]
isLastPass = passOb is lastPass
sid = effect.ID + passOb.members['vertex'] + passOb.members['fragment']
shader = self.fullScreenEffects._e3dShaders[sid]
if i == 0 and isScenePass:
self._shaderOverride = shader
else:
self._shaderOverride = None
shader.set()
for txID in effect.textures2d:
tex = self._textures.getTexture(effect.ID + '_' + txID)
if tex:
shader.setTexture(txID, tex)
for txID in effect.texturesCube:
tex = self._textures.getTextureCube(effect.ID + '_' + txID)
if tex:
shader.setTexture(txID, tex)
indexes = []
aTypes = [attachmentTypeEnum.depth]
# aTypes = []
targetsIDs = []
rt = self.fullScreenEffects._builtRenderTargets.get(effect.ID)
for outID in passOb.members['out']:
if outID not in ['_raw', '_depth', '_stencil', '_scene']:
ind = rt._attachments.get(effect.ID + '_' + outID)
if ind is not None:
indexes.append(ind)
targetsIDs.append(outID)
if len(indexes) > 0:
rt._activate(aTypes, indexes)
for txID in passOb.members['in']:
if txID not in ['_raw'] and txID not in targetsIDs:
realID = effect.ID + '_' + txID if txID not in ['_scene', '_depth'] else txID
if effect.textureType == renderTextureTypeEnum.t2D:
value = self._textures.getTexture(realID)
shader.setTexture(txID, value)
else:
pass
# shader.setTextureCube(txID, realID)
for txID in passOb.members['out']:
if txID not in ['_scene', '_raw', '_depth', '_stencil'] and txID not in targetsIDs:
realID = effect.ID + '_' + txID if txID != '_scene' else txID
if effect.textureType == renderTextureTypeEnum.t2D:
value = self._textures.getTexture(realID)
shader.setTexture(txID, value)
else:
pass
# shader.setTextureCube(txID, realID)
if isLastPass and self.fullScreenEffects._sceneRT:
self.fullScreenEffects._sceneRT._deActivate()
if '_raw' in passOb.members['in']:
self._drawingData.clearColor = passOb.members.get('clear', self._drawingData.clearColor)
self.drawScene(passOb.name)
if rt:
rt._deActivate()
elif '_scene' in passOb.members['out']:
if not isLastPass:
self.fullScreenEffects._sceneRT._activate([], colorIndexes=[0])
else:
self.setRenderTarget()
self._drawScreenQuad(shader)
def drawScene(self, passN):
self._setClearColor(self._drawingData.clearColor)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# todo: reimplement passN callback
self.renderMeshes(self._drawingData)
def drawSkybox(self, sky):
# self.shader.reset()
if not sky.shader._isSet:
sky.shader.set()
sky._material.setMaterialValues(self.shader)
stride = 16
hand = sky.shader._attributesHandlesCache['position']
glEnableVertexAttribArray(hand)
glVertexAttribPointer(hand, 4, GL_FLOAT, False, stride, VBO)
glDrawArrays(GL_TRIANGLES, 0, 36)
glDisableVertexAttribArray(hand)
def resize(self, size):
glViewport(0, 0, size[0], size[1])
@staticmethod
def _enableAttribute(shader, attribName, stride, vBuffer):
"""
@param attribName: "color", "normal", "tangent", "bitangent", "texcoord" 0 -2,
"boneweights", "boneindexes"
@type attribName: str
@param stride: vertex size
@type stride: int
@param vBuffer: vbo + offset
@type vBuffer: vbo
"""
res = shader._attributesHandlesCache.get(attribName)
rl = []
if res is not None:
rl.append(res)
glEnableVertexAttribArray(res)
if attribName.__contains__('texcoord'):
glVertexAttribPointer(res, 2, GL_FLOAT, False, stride, vBuffer)
elif attribName.__contains__('boneweights'):
glVertexAttribPointer(res, 4, GL_FLOAT, False, stride, vBuffer)
elif attribName.__contains__('boneindexes'):
# if glVertexAttribIPointer:
# glVertexAttribIPointer(res, 4, GL_INT, stride, vBuffer)
# else: # todo: add proper check
glVertexAttribPointer(res, 4, GL_FLOAT, False, stride, vBuffer)
else:
glVertexAttribPointer(res, 3, GL_FLOAT, False, stride, vBuffer)
return rl
@staticmethod
def _disableAttribute(attribName, currentShader):
res = currentShader._attributesHandlesCache.get(attribName, -1)
glDisableVertexAttribArray(res)
def __createCubeVBO(self):
self._cubeVBO = VBO(data=self._cube_array, target=GL_ARRAY_BUFFER, usage=GL_STATIC_DRAW)
def __createScreenQuadStuff(self):
self._screenQuadVBO = VBO(data=self._screenQuad_array, target=GL_ARRAY_BUFFER, usage=GL_STATIC_DRAW)
shadersPath = self._engine.path.defaults.shaders
vs = path.join(shadersPath, 'default_sq_VS.vs')
fs = path.join(shadersPath, 'default_sq_FS.fs')
self._screenQuadShader = self._shaders.loadShader(vs, fs, 'default_sqShader')
def renderMeshes(self, drawingData):
for mesh in drawingData.meshes:
resetRequired = True
attribs = []
meshid = mesh.ID
renderDataPerInstance = drawingData.instances.get(meshid)
if renderDataPerInstance is None or len(renderDataPerInstance) < 1:
continue
vertexBuffer = self._vertexBuffers.get(meshid)
if vertexBuffer is None:
vertexBuffer = VBO(data=mesh._vertexBufferArray, target=GL_ARRAY_BUFFER, usage=GL_STATIC_DRAW)
self._vertexBuffers[meshid] = vertexBuffer
indexBuffer = self._indexBuffers.get(meshid)
if indexBuffer is None:
indexBuffer = VBO(data=mesh._indexBufferArray, target=GL_ELEMENT_ARRAY_BUFFER, usage=GL_STATIC_DRAW)
self._indexBuffers[meshid] = indexBuffer
vertexBuffer.bind()
indexBuffer.bind()
if self._shaderOverride:
currentShader = self._shaderOverride
_setSceneUniforms(currentShader, drawingData.defaultSceneParams)
else:
currentShader = None
for currentMat, defaultObjectParams, transformations, modelID in renderDataPerInstance:
if not self._shaderOverride:
currentShader = self._engine.shaders._shadersCache[currentMat.shaderID]
if currentShader is None:
raise RuntimeError('Shader {} not found'.format(currentMat.shaderID))
if not currentShader._isSet:
currentShader.set()
_setSceneUniforms(currentShader, drawingData.defaultSceneParams)
# attribs = OGL3Backend.enableAttributes(mesh, currentShader)
if resetRequired:
OGL3Backend.disableAttributes(attribs)
attribs = OGL3Backend.enableAttributes(mesh, currentShader)
currentShader.reset()
resetRequired = False
_setObjectUniforms(currentShader, defaultObjectParams)
if transformations:
_setBoneTransformationsForMesh(currentShader, transformations, drawingData.modelBoneDirs[modelID])
setMaterialValues(self._textures, currentShader, currentMat)
self.renderMesh(mesh)
OGL3Backend.disableAttributes(attribs)
indexBuffer.unbind()
vertexBuffer.unbind()
self._engine.shaders._setShaderState(self._engine.shaders._currentShader, 0)
@staticmethod
def enableAttributes(mesh, currentShader):
stride = int(mesh._stride)
used_attribs = []
for dd in mesh._declaration:
u = OGL3Backend._enableAttribute(currentShader, dd._name, stride, dd._offset)
used_attribs.extend(u)
return used_attribs
@staticmethod
def disableAttributes(used_attribs):
while len(used_attribs) > 0:
glDisableVertexAttribArray(used_attribs.pop())
def renderMesh(self, mesh):
self._poliCount += mesh.primitiveCount
glDrawElements(GL_TRIANGLES, mesh.indexCount, GL_UNSIGNED_INT, 0) # Using indexing
def _bindScreenQuad(self):
if not self._screenQuadVBO:
self.__createScreenQuadStuff()
self._screenQuadVBO.bind()
def _unbindScreenQuad(self):
self._screenQuadVBO.unbind()
def _drawScreenQuad(self, shader):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self._bindScreenQuad()
stride = 12
hand = shader._attributesHandlesCache.get('position', -1)
glEnableVertexAttribArray(hand)
glVertexAttribPointer(hand, 3, GL_FLOAT, False, stride, 0)
if not shader._isSet:
shader.set()
glDrawArrays(GL_TRIANGLES, 0, 6)
glDisableVertexAttribArray(hand)
@staticmethod
def create2DTexture(ID, mipmapsNumber, pix, w, h, repeat=True):
glGetError()
tex = np.array([0], np.uint32)
glGenTextures(1, tex)
glerr = glGetError()
tex = tex[0]
if tex < 1:
raise RuntimeError('GL error {} when creating texture.'.format(glerr))
glBindTexture(GL_TEXTURE_2D, tex)
glPixelStorei(GL_UNPACK_ALIGNMENT, 4)
if mipmapsNumber < 0:
mipmapsNumber = 0
# MANDATORY >>>>>>>>>>>
if repeat:
edgeMode = GL_REPEAT
else:
edgeMode = GL_CLAMP_TO_EDGE
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, edgeMode)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, edgeMode)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
if mipmapsNumber > 0 and repeat:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
else:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, | |
<filename>scripts/jprep_ranges.py<gh_stars>0
""""
Scan output from consol_to_json.py and create C3S error report.
Test
----
Initial scan assessing completeness and consistency of json files.
JprepRanges
-----------
Sa
"""
import collections
MASKS = {'LImon.snd': 'fx.sftlf', 'LImon.snw': 'fx.sftlf', 'Lmon.mrro': 'fx.sftlf', 'Lmon.mrsos': 'fx.sftlf', 'Ofx.deptho': 'Ofx.sftof', 'Omon.sos': 'Ofx.sftof', 'Omon.tos': 'Ofx.sftof', 'Omon.zos': 'Ofx.sftof', 'SImon.siconc': 'Ofx.sftof', 'SImon.simass': 'Ofx.sftof', 'SImon.sitemptop': 'SImon.siconc', 'SImon.sithick': 'SImon.siconc', 'fx.mrsofc': 'fx.sftlf'}
schema = {
"header": {
"qc_check:": "CF-checker",
"author": "<NAME>",
"institution": "CEDA",
"date": "2020-07-22T09:02:51.352928",
"version": "1.0"
},
"datasets": {
"<handle>": {
"dset_id": "<id>",
"qc_status": "pass|fail",
"dataset_qc": {
"error_severity": "na|minor|major|unknown",
"error_message": "<output from check>|na"
},
"files": {
"<handle>": {
"filename": "<filename>",
"qc_status": "pass|fail",
"error_severity": "na|minor|major|unknown",
"error_message": "<output from check>|na"
},
"<handle>": {
"filename": "<filename>",
"qc_status": "pass|fail",
"error_severity": "na|minor|major|unknown",
"error_message": "<output from check>|na"
}
}
}
}
}
class Dcheck(object):
REQUIRED = dict( datasets = dict( dset_id='identifier', qc_status='IN:pass|fail', dataset_qc='dictionary' ),
header = dict( cq_check='name', author='person', institution='name', date='datetime', version='string' ),
files = dict( filename='string', qc_status='IN:pass|fail', error_severity='IN:na|minor|major|unknown', error_message='message or na' ) )
def __init__(self):
self.errors = dict()
def check(self,dd):
assert all( [x in dd.keys() for x in ['header','datasets']] ), 'Dictionary must contain header and datasets entries'
self.check_header( dd['header'] )
self.check_datasets( dd['datasets'] )
if len(self.errors.keys() ) == 0:
print( 'NO ERRORS FOUND' )
else:
print ( self.errors )
def check_header(self,ee):
ma = [k for k in self.REQUIRED['header'].keys() if k not in ee]
if len(ma) != 0:
self.errors['header'] = 'Missing attribute(s): %s' % (sorted(ma))
def check_datasets(self,ee):
cc = collections.defaultdict( int )
for h,ds in ee.items():
ma = [k for k in self.REQUIRED['datasets'].keys() if k not in ds]
for a in ma:
cc[a] += 1
if cc.keys() != 0:
ma = sorted( list( cc.keys() ) )
self.errors['datasets'] = 'Missing attribute(s): %s' % ', '.join( ['%s (%s)' % (k,cc[k]) for k in ma] )
import csv, json, time, glob, os
from local_utilities import get_new_ranges
ds_pass_msg = dict( error_severity='na', error_message='No handle registry errors detected' )
input_dataset_list = 'c3s34g_pids_qcTest_Oct2020.txt'
major_error_codes = {'ERROR.ds.0900'}
minor_error_codes = {'ERROR.ds.0040'}
##workflow_errors_detected = ['Lmon.mrro', 'Amon.psl']
##print('NEED TO REVISIT DATA FOR %s' % workflow_errors_detected )
workflow_errors_detected = []
class Base(object):
DataRoot = '../../cmip6_range_check/scripts/json_03/'
DataRoot = './json_08/'
def get_result_directories(root_dir=None):
if root_dir == None:
root_dir = Base.DataRoot
dirs = sorted( [x for x in glob.glob( '%s/*' % root_dir ) if os.path.isdir(x)] )
dirs_excluded = [x for x in dirs if x.rpartition( '/' )[-1] in workflow_errors_detected]
dirs_passed = [x for x in dirs if x not in dirs_excluded]
return dirs_passed,dirs_excluded
class FileReport(object):
def __init__(self,json_file):
assert os.path.isfile( json_file )
self.ee = json.load( open( json_file, 'r' ) )
dat = self.ee['data']
files = sorted( list( dat['headers'].keys() ) )
tmp = collections.defaultdict( set )
for r in dat['records'].keys():
fn,xxx,tag = r.rpartition( ':' )
assert fn[:-3] in files
##print( 'xxxx',r,fn,files)
tmp[fn].add(tag)
self.records = dict()
for fn,x in tmp.items():
self.records[fn] = sorted( list( x ) )
def range_dump( ofile='ranges_clean.csv',c3s_filter=True):
nr = get_new_ranges()
ks = sorted( list( nr.keys() ) )
print(ks)
if c3s_filter:
ee = json.load( open('data/c3s34g_variables.json' ) )
requested = set()
for k,l in ee['requested'].items():
for i in l:
requested.add( '%s.%s' % (k,i) )
ks = [k for k in ks if k in requested]
oo = open( ofile, 'w' )
for k in ks:
this = nr[k]._asdict()
#rec = [k,] + [str(this[x].value) for x in ['max', 'min', 'ma_max', 'ma_min', 'max_l0', 'min_l0'] ]
rec = [k,] + [str(this[x].value) for x in ['max', 'min', 'ma_max', 'ma_min'] ]
print( rec )
oo.write( '\t'.join( rec ) + '\n' )
oo.close()
class Test(Base):
def __init__(self,idir=None):
if idir == None:
idir = self.DataRoot
nr = get_new_ranges()
print (nr.keys())
dirs = sorted( [x for x in glob.glob( '%s/*' % idir ) if os.path.isdir(x)] )
oo = open( 'jprep_ranges_test.csv','w')
for d in dirs:
dn = d.rpartition( '/' )[-1]
ss = set()
for f in sorted( glob.glob( '%s/*.json' % d ) ):
ee = json.load( open( f ) )
if 'mask' not in ee['data']['summary'].keys():
print ('No mask item::',f)
ss.add( tuple( sorted( ee['data']['summary'].keys() ) ) )
print( d, dn in nr, ss )
rec = [str(x) for x in [d,dn in nr, ss, len(ss) == 1]]
oo.write( '\t'.join(rec) + '\n' )
oo.close()
def apply_conditional( func, ll, tt, kk ):
this = [x[kk] for x in ll if type(x[kk]) == tt ]
if len( this ) > 0:
return func( this )
return None
def record_checks(fn, tags, rcbook,with_mask=False):
"""Look through the NetCDF file level json output and generate QC report.
current version gets as far as basic information .. need to add ranges , and mask info """
basic = [ rcbook[ '%s:%s' % (fn,t) ]['basic'] for t in tags ]
try:
quantiles = [ rcbook[ '%s:%s' % (fn,t) ]['quantiles'] for t in tags ]
except:
print( 'quantiles missing' )
quantiles = None
nn = sum( [x[3] for x in basic] )
try:
brep = ( min( [x[0] for x in basic] ),
max( [x[1] for x in basic] ),
min( [x[2] for x in basic] ),
max( [x[2] for x in basic] ),
nn )
except:
print( fn, 'FAILED TO EXTRACT EXTREMES, nn=', nn )
print ( basic[0][0], type( basic[0][0] ) )
try:
basicx = [x for x in basic if type(x[0]) == type(1.)]
brep = ( apply_conditional( min, basic, type(1.0), 0 ),
apply_conditional( max, basic, type(1.0), 1 ),
apply_conditional( min, basic, type(1.0), 2 ),
apply_conditional( max, basic, type(1.0), 2 ),
nn )
except:
print( fn, 'FAILED AGAIN TO EXTRACT EXTREMES', basic)
raise
if with_mask:
try:
mask = [ rcbook[ '%s:%s' % (fn,t) ]['mask_ok'] for t in tags ]
mrep = all( [x[0] == 'masks_match' for x in mask ] )
except:
mrep = False
else:
mrep = None
if quantiles == None:
qrep = None
else:
try:
qrep = ( *[ max( [x[k] for x in quantiles] ) for k in range(5)],
*[ min( [x[-5+k] for x in quantiles] ) for k in range(5)] )
except:
qrep = None
if tags[0].find('-') == -1:
lrep = None
else:
cc = collections.defaultdict( list)
for t in tags:
lev = int( t.split('-')[-1] )
basic = rcbook[ '%s:%s' % (fn,t) ]['basic']
try:
cc[lev].append( float(basic[0]) )
except:
pass
##print( 'SKIPPPING LREP BASIC:',basic[0] )
##raise
lrep = []
for l in range(19):
if len( cc[l] ) > 0:
lrep.append( min(cc[l]) )
else:
lrep.append(None)
return brep, mrep, qrep, lrep
class Hlook(object):
def __init__(self):
ee = json.load( open( '../_work/QC_template_v5_2021-03-25.json' ) )
self.ee = dict()
self.ff = dict()
for h,i in ee['datasets'].items():
self.ee[h] = i['dset_id']
self.ff[i['dset_id']] = h
class TestFile(object):
ATTRIBUTES = ('basic', 'drs', 'empty_count', 'extremes', 'mask', 'quantiles')
def __init__(self,hlk):
self.hlk = hlk
def check_file(self,jfile, vmax=None, vmin=None, vmamax=None, vmamin=None, with_mask=False, jrep_file=None, fcsv=None, ffcsv=None, mkd=None):
print (jfile )
fr = FileReport( jfile )
reps = {}
## json_03/Amon.ts/ts_Amon_INM-CM5-0_ssp245_r1i1p1f1_gr1.json
if jrep_file == None:
jrep_file = jfile.replace( 'json_', 'json_rep_' )
tree = jrep_file.split( '/' )
if tree[0] == '.':
tree=tree[1:]
assert os.path.isdir( tree[0] ), '%s not found' % tree[0]
if not os.path.isdir( '/'.join( tree[:2] ) ):
os.mkdir( '/'.join( tree[:2] ) )
ests = set()
esvs = set()
dsfail = False
for fn,tags in fr.records.items():
fns = fn[:-3]
tid = fr.ee['data']['headers'][fns]['tech']['file_info']['tid']
contact = fr.ee['data']['headers'][fns]['tech']['file_info']['contact']
if contact == None:
contact = '-'
##
## table, var, inst, model, mip, expt, variant, grid, version
drs = fr.ee['data']['headers'][fns]['tech']['file_info']['drs']
table, var, inst, model, mip, expt, variant, grid, version = drs
path = '/badc/cmip6/data/CMIP6/' + '/'.join( [mip,inst,model,expt,variant,table,var,grid,version] )
#
# should be unique in a set of file records
##
esgf_id = '.'.join( ['CMIP6', mip, inst, model, expt, variant, table, var, grid, version ] )
if esgf_id not in self.hlk.ff:
print( "SEVERE: esgf ID not found: %s" % esgf_id )
hdl = 'hdl_not_found'
else:
hdl = self.hlk.ff[ esgf_id ]
rcs,mcs,qrep,lrep = record_checks(fn,tags,fr.ee['data']['records'],with_mask=with_mask)
if rcs[0] == None:
tests = [False,False]
else:
tests = [rcs[0] >= vmin, rcs[1] <= vmax]
if vmamin != None:
tests.append( rcs[2] >= vmamin )
else:
tests.append( None )
if vmamax != None:
tests.append( rcs[3] <= vmamax )
else:
tests.append( None )
tests.append( rcs[4] == 0 )
if with_mask:
tests.append ( mcs )
else:
tests.append( None )
emsg = []
if rcs[0] == None:
emsg.append( 'Minimum value not found' )
elif rcs[0] < vmin:
emsg.append( 'Minimum %s < %s' | |
<reponame>orionlee/pht_eb_stats
"""
Convenience helpers for `lightkurve` package.
"""
import os
import logging
import math
import json
import warnings
from collections import OrderedDict
import astropy.units as u
import numpy as np
from scipy.interpolate import UnivariateSpline
from IPython.display import display, HTML
import lightkurve as lk
from lightkurve.search import SearchResult
import asyncio_compat
log = logging.getLogger(__name__)
def of_sector(lcf_coll, sectorNum):
for lcf in lcf_coll:
if lcf.meta["SECTOR"] == sectorNum:
return lcf
return None
def of_sectors(*args):
lcf_coll = args[0]
if len(args) == 1:
# when no sectors are specified, return entire collection
# For convenience: when a notebooks is modified such that
# a user sometimes use a subset of sectors , and sometimes everything
# the user can can still use of_sectors() wrapper regardless
return lcf_coll
sector_nums = args[1:]
return lcf_coll[np.in1d(lcf_coll.sector, sector_nums)]
def of_sector_n_around(lk_coll_or_sr, sector_num, num_additions=8):
def do_for_lk_coll():
subset_slice = _get_slice_for_of_sector_n_around(
lk_coll_or_sr,
lambda coll: coll.sector,
sector_num,
num_additions=num_additions,
)
if subset_slice is not None:
# workaround bug that lcf_coll[start:end] returns a list only
return lk.LightCurveCollection(lk_coll_or_sr[subset_slice])
else:
return lk.LightCurveCollection([])
def do_for_sr():
subset_slice = _get_slice_for_of_sector_n_around(
lk_coll_or_sr,
lambda sr: sr.table["sequence_number"],
sector_num,
num_additions=num_additions,
)
if subset_slice is not None:
return lk_coll_or_sr[subset_slice]
else:
return SearchResult()
if hasattr(lk_coll_or_sr, "sector"):
return do_for_lk_coll()
elif hasattr(lk_coll_or_sr, "table") and lk_coll_or_sr.table["sequence_number"] is not None:
return do_for_sr()
else:
raise TypeError(f"Unsupported type of collection: {type(lk_coll_or_sr)}")
def _get_slice_for_of_sector_n_around(coll, sector_accessor_func, sector_num, num_additions):
if sector_num not in sector_accessor_func(coll):
return None
idx = np.where(sector_accessor_func(coll) == sector_num)[0][0]
# if num_additions is odd number, we add one to older sector
start = max(idx - math.ceil(num_additions / 2), 0)
end = min(idx + math.floor(num_additions / 2) + 1, len(coll))
# case the start:end slice does not fill up the requested num_additions,
# try to fill it up
cur_slice_size = end - start - 1
if cur_slice_size < num_additions:
num_more_needed = num_additions - cur_slice_size
if start > 0:
start = max(start - num_more_needed, 0)
else:
end = min(end + num_more_needed, len(coll))
return slice(start, end)
def of_2min_cadences(lcf_coll):
"""Return LightCurveFiles of short, typically 2-minute cadence, only.
Primary use case is to filter out 20-second files.
"""
filtered = [lcf for lcf in lcf_coll if "short" == estimate_cadence_type(lcf)]
return lk.LightCurveCollection(filtered)
def estimate_cadence(lc):
"""Estimate the cadence of a lightcurve by returning the median of a sample"""
return np.nanmedian(np.diff(lc.time[:100].value))
def map_cadence_type(cadence_in_days):
long_minimum = 9.9 / 60 / 24 # 10 minutes in days, with some margin of error
short_minimum = 0.9 / 60 / 24 # 1 minute in days, with some margin of error
if cadence_in_days is None:
return None
if cadence_in_days >= long_minimum:
return "long"
if cadence_in_days >= short_minimum:
return "short"
return "fast"
def estimate_cadence_type(lc):
"""Estimate the type of cadence to be one of long, short, or fast.
The definition is the same as ``exptime`` in `lightkurve.search_lightcurve()`.
"""
return map_cadence_type(estimate_cadence(lc))
def of_tic(lcf_coll, tic):
"""Return LightCurveFiles of the given TIC.
Useful in case the default MAST result returned nearby targets.
"""
filtered = [lcf for lcf in lcf_coll if lcf.meta.get("TICID", None) == tic]
return lk.LightCurveCollection(filtered)
def estimate_object_radius_in_r_jupiter(lc, depth):
"""Return a back of envelope estimate of a companion object's radius."""
R_JUPITER_IN_R_SUN = 71492 / 695700
r_star = lc.meta.get("RADIUS") # assumed to be in R_sun
if r_star is None or depth <= 0:
return None # cannot estimate
r_obj = math.sqrt(r_star * r_star * depth)
r_obj_in_r_jupiter = r_obj / R_JUPITER_IN_R_SUN
return r_obj_in_r_jupiter
def download_lightcurves_of_tic_with_priority(tic, download_filter_func=None, download_dir=None):
"""For a given TIC, download lightcurves across all sectors.
For each sector, download one based on pre-set priority.
"""
sr_unfiltered = lk.search_lightcurve(f"TIC{tic}", mission="TESS")
if len(sr_unfiltered) < 1:
print(f"WARNING: no result found for TIC {tic}")
return None, None, None
sr_unfiltered = sr_unfiltered[sr_unfiltered.target_name == str(tic)] # in case we get some other nearby TICs
# filter out HLSPs not supported by lightkurve yet
sr = sr_unfiltered[sr_unfiltered.author != "DIAMANTE"]
if len(sr) < len(sr_unfiltered):
print("Note: there are products not supported by Lightkurve, which are excluded from download.")
# for each sector, filter based on the given priority.
# - note: prefer QLP over TESS-SPOC because QLP is detrended, with multiple apertures within 1 file
sr = filter_by_priority(
sr,
author_priority=["SPOC", "QLP", "TESS-SPOC"],
exptime_priority=["short", "long", "fast"],
)
num_filtered = len(sr_unfiltered) - len(sr)
num_fast = len(sr_unfiltered[sr_unfiltered.exptime < 60 * u.second])
if num_filtered > 0:
msg = f"{num_filtered} rows filtered"
if num_fast > 0:
msg = msg + f" ; {num_fast} fast (20secs) products."
print(msg)
display(sr)
# let caller to optionally further restrict a subset to be downloaded
sr_to_download = sr
if download_filter_func is not None:
sr_to_download = download_filter_func(sr)
if len(sr_to_download) < len(sr):
display(
HTML(
"""<font style="background-color: yellow;">Note</font>:
SearchResult is further filtered - only a subset will be downloaded."""
)
)
lcf_coll = sr_to_download.download_all(download_dir=download_dir)
if lcf_coll is not None and len(lcf_coll) > 0:
print(f"TIC {tic} \t#sectors: {len(lcf_coll)} ; {lcf_coll[0].meta['SECTOR']} - {lcf_coll[-1].meta['SECTOR']}")
print(
(
f" sector {lcf_coll[-1].meta['SECTOR']}: \t"
f"camera = {lcf_coll[-1].meta['CAMERA']} ; ccd = {lcf_coll[-1].meta['CCD']}"
)
)
else:
print(f"TIC {tic}: no data")
return lcf_coll, sr, sr_unfiltered
def download_lightcurve(
target,
mission=("Kepler", "K2", "TESS"),
exptime="short",
author="SPOC",
download_dir=None,
use_cache="yes",
display_search_result=True,
):
"""
Wraps `lightkurve.search_lightcurve()` and the
subsequent `lightkurve.search.SearchResult.download_all()` calls,
with the option of caching, so that for a given search,
if the the result has been downloaded, the cache will be used.
The parameters all propagate to the underlying `search_lightcurvefile()`
and `download_all()` calls. The lone exception is `use_cache`.
Parameters
----------
use_cache : str, must be one of 'yes', or 'no'\n
OPEN: an option of 'fallback': cache will be used when offline.\n
OPEN: for now, actual download lightcurve cache will still be used if
available irrespective of the settings.
Returns
-------
collection : `~lightkurve.collections.Collection` object
Returns a `~lightkurve.collections.LightCurveCollection`
containing all lightcurve files that match the criteria
"""
if use_cache == "no":
return _search_and_cache(target, mission, exptime, author, download_dir, display_search_result)
if use_cache == "yes":
result_file_ids = _load_from_cache_if_any(target, mission, download_dir)
if result_file_ids is not None:
result_files = list(map(lambda e: f"{download_dir}/mastDownload/{e}", result_file_ids))
return lk.collections.LightCurveCollection(list(map(lambda f: lk.read(f), result_files)))
# else
return _search_and_cache(target, mission, exptime, author, download_dir, display_search_result)
# else
raise ValueError("invalid value for argument use_cache")
# Private helpers for `download_lightcurvefiles`
def _search_and_cache(target, mission, exptime, author, download_dir, display_search_result):
search_res = lk.search_lightcurve(target=target, mission=mission, exptime=exptime, author=author)
if len(search_res) < 1:
return None
if display_search_result:
_display_search_result(search_res)
_cache_search_result_product_identifiers(search_res, download_dir, target, mission)
return search_res.download_all(quality_bitmask="default", download_dir=download_dir)
def _display_search_result(search_res):
from IPython.core.display import display
tab = search_res.table
# move useful columns to the front
preferred_cols = ["proposal_id", "target_name", "sequence_number", "t_exptime"]
colnames_reordered = preferred_cols + [c for c in tab.colnames if c not in preferred_cols]
display(tab[colnames_reordered])
def _load_from_cache_if_any(target, mission, download_dir):
key = _get_cache_key(target, mission)
return _load_search_result_product_identifiers(download_dir, key)
def _cache_search_result_product_identifiers(search_res, download_dir, target, mission):
key = _get_cache_key(target, mission)
identifiers = _to_product_identifiers(search_res)
_save_search_result_product_identifiers(identifiers, download_dir, key)
return key
def _get_search_result_cache_dir(download_dir):
# TODO: handle download_dir is None (defaults)
cache_dir = f"{download_dir}/mastQueries"
if os.path.isdir(cache_dir):
return cache_dir
# else it doesn't exist, make a new cache directory
try:
os.mkdir(cache_dir)
# downloads locally if OS error occurs
except OSError:
log.warning(
"Warning: unable to create {}. "
"Cache MAST query results to the current "
"working directory instead.".format(cache_dir)
)
cache_dir = "."
return cache_dir
def _get_cache_key(target, mission):
# TODO: handle cases the generated key is not a valid filename
return f"{target}_{mission}_ids"
def _to_product_identifiers(search_res):
"""
Returns
-------
A list of str, constructed from `(obs_collection, obs_id, productFilename)` tuples, that can
identify cached lightcurve file,s if any.
"""
return list(
map(
lambda e: e["obs_collection"] + "/" + e["obs_id"] + "/" + e["productFilename"],
search_res.table,
)
)
def _save_search_result_product_identifiers(identifiers, download_dir, key):
resolved_cache_dir = _get_search_result_cache_dir(download_dir)
filepath = f"{resolved_cache_dir}/{key}.json"
fp = open(filepath, "w+")
json.dump(identifiers, fp)
return filepath
def _load_search_result_product_identifiers(download_dir, key):
resolved_cache_dir = _get_search_result_cache_dir(download_dir)
filepath = f"{resolved_cache_dir}/{key}.json"
try:
fp = open(filepath, "r")
return json.load(fp)
except OSError as err:
# errno == 2: file not found, typical case of cache miss
# errno != 2: unexpected error, log a warning
if err.errno != 2:
log.warning("Unexpected OSError in retrieving cached search result: {}".format(err))
return None
def filter_by_priority(
sr,
author_priority=["SPOC", "TESS-SPOC", "QLP"],
exptime_priority=["short", "long", "fast"],
):
author_sort_keys = {}
for idx, author in enumerate(author_priority):
author_sort_keys[author] = idx + 1
exptime_sort_keys = {}
for idx, exptime in enumerate(exptime_priority):
exptime_sort_keys[exptime] = idx + 1
def calc_filter_priority(row):
# Overall priority key is in the form of <author_key><exptime_key>, e.g., 101
# - "01" is the exptime_key
# - the leading "1" is the author_key, given it is the primary one
author_default = max(dict(author_sort_keys).values()) + 1
author_key = author_sort_keys.get(row["author"], author_default) * 100
# secondary | |
# Copyright 2017 - RoboDK Software S.L. - http://www.robodk.com/
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------
# This file is a POST PROCESSOR for Robot Offline Programming to generate programs
# for an ABB robot with RoboDK
#
# To edit/test this POST PROCESSOR script file:
# Select "Program"->"Add/Edit Post Processor", then select your post or create a new one.
# You can edit this file using any text editor or Python editor. Using a Python editor allows to quickly evaluate a sample program at the end of this file.
# Python should be automatically installed with RoboDK
#
# You can also edit the POST PROCESSOR manually:
# 1- Open the *.py file with Python IDLE (right click -> Edit with IDLE)
# 2- Make the necessary changes
# 3- Run the file to open Python Shell: Run -> Run module (F5 by default)
# 4- The "test_post()" function is called automatically
# Alternatively, you can edit this file using a text editor and run it with Python
#
# To use a POST PROCESSOR file you must place the *.py file in "C:/RoboDK/Posts/"
# To select one POST PROCESSOR for your robot in RoboDK you must follow these steps:
# 1- Open the robot panel (double click a robot)
# 2- Select "Parameters"
# 3- Select "Unlock advanced options"
# 4- Select your post as the file name in the "Robot brand" box
#
# To delete an existing POST PROCESSOR script, simply delete this file (.py file)
#
# ----------------------------------------------------
# More information about RoboDK Post Processors and Offline Programming here:
# http://www.robodk.com/help#PostProcessor
# http://www.robodk.com/doc/en/PythonAPI/postprocessor.html
# ----------------------------------------------------
# ----------------------------------------------------
# Import RoboDK tools
from robodk import *
ONETAB = ' ' # one tab equals 4 spaces
# Define a custom header (variable declaration)
CUSTOM_HEADER = '''
! -------------------------------
! Define your variables here
! ...
'''
# Define your custom programs (predefined functions, not altered by RoboDK):
CUSTOM_FUNCTIONS = '''
! -------------------------------
! Define your functions here
! ...
'''
# ----------------------------------------------------
def pose_2_str(pose):
"""Prints a pose target"""
[x,y,z,q1,q2,q3,q4] = Pose_2_ABB(pose)
return ('[%.3f, %.3f, %.3f],[%.8f, %.8f, %.8f, %.8f]' % (x,y,z,q1,q2,q3,q4))
def angles_2_str(angles):
"""Prints a joint target"""
njoints = len(angles)
# extend the joint target if the robot has less than 6 degrees of freedom
if njoints < 6:
angles.extend([0]*(6-njoints))
# Generate a string like:
# [10,20,30,40,50,60]
# with up to 6 decimals
return '[%s]' % (','.join(format(ji, ".6f") for ji in angles[0:6]))
def extaxes_2_str(angles):
"""Prints the external axes, if any"""
# extend the joint target if the robot has less than 6 degrees of freedom
njoints = len(angles)
if njoints <= 6:
# should print 9E9 for unset external axes
# [9E+09,9E+09,9E+09,9E+09,9E+09,9E+09]
return '[9E+09,9E+09,9E+09,9E+09,9E+09,9E+09]'
extaxes_str = (','.join(format(ji, ".6f") for ji in angles[6:njoints]))
if njoints < 12:
extaxes_str = extaxes_str + ',' + ','.join(['9E9']*(12-njoints))
# If angles is [j1,j2,j3,j4,j5,j6,10,20], it will generate a string like:
# [10,20,9E9,9E9,9E9,9E9]
# with up to 6 decimals
return '[%s]' % extaxes_str
# ----------------------------------------------------
# Object class that handles the robot instructions/syntax
class RobotPost(object):
"""Robot post object"""
PROG_EXT = 'prg' # set the program extension
# other variables
ROBOT_POST = 'unset'
ROBOT_NAME = 'generic'
PROG_FILES = []
nPROGS = 0
PROG = ''
TAB = ''
LOG = ''
ZONEDATA = 'fine'
SPEEDDATA = '[500,500,5000,1000]'
FRAME_NAME = 'rdkWObj'
TOOL_NAME = 'rdkTool'
def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs):
self.ROBOT_POST = robotpost
self.ROBOT_NAME = robotname
self.PROG = ''
self.LOG = ''
def ProgStart(self, progname):
self.nPROGS = self.nPROGS + 1
if self.nPROGS == 1:
self.addline('%%%')
self.addline(' VERSION:1')
self.addline(' LANGUAGE:ENGLISH')
self.addline('%%%')
self.addline('')
self.addline('MODULE MOD_%s' % progname)
self.TAB = ONETAB
self.addline('')
self.addline('PERS tooldata rdkTool := [TRUE,[[0,0,0],[1,0,0,0]],[2,[0,0,15],[1,0,0,0],0,0,0.005]];')
self.addline('PERS wobjdata rdkWObj := [FALSE, TRUE, "", [[0,0,0],[1,0,0,0]],[[0,0,0],[1,0,0,0]]];')
self.addcode(CUSTOM_HEADER)
self.addcode(CUSTOM_FUNCTIONS)
self.addline('')
self.TAB = ONETAB
self.addline('PROC %s()' % progname)
self.TAB = ONETAB + ONETAB # instructions need two tabs
def ProgFinish(self, progname):
self.TAB = ONETAB
self.addline('ENDPROC')
def ProgSave(self, folder, progname, ask_user = False, show_result = False):
self.addline('')
self.TAB = ''
self.addline('ENDMODULE')
progname = progname + '.' + self.PROG_EXT
if ask_user or not DirExists(folder):
filesave = getSaveFile(folder, progname, 'Save program as...')
if filesave is not None:
filesave = filesave.name
else:
return
else:
filesave = folder + '/' + progname
fid = open(filesave, "w")
fid.write(self.PROG)
fid.close()
print('SAVED: %s\n' % filesave) # tell RoboDK the path of the saved file
self.PROG_FILES = filesave
# open file with default application
if show_result:
if type(show_result) is str:
# Open file with provided application
import subprocess
p = subprocess.Popen([show_result, filesave])
elif type(show_result) is list:
import subprocess
p = subprocess.Popen(show_result + [filesave])
else:
# open file with default application
import os
os.startfile(filesave)
if len(self.LOG) > 0:
mbox('Program generation LOG:\n\n' + self.LOG)
def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass):
"""Send a program to the robot using the provided parameters. This method is executed right after ProgSave if we selected the option "Send Program to Robot".
The connection parameters must be provided in the robot connection menu of RoboDK"""
UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass)
def MoveJ(self, pose, joints, conf_RLF=None):
"""Add a joint movement"""
self.addline('MoveAbsJ [%s,%s],%s,%s,%s,\WObj:=%s;' % (angles_2_str(joints), extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA, self.TOOL_NAME, self.FRAME_NAME))
def MoveL(self, pose, joints, conf_RLF=None):
"""Add a linear movement"""
#self.addline('MoveL [%s,[0,0,0,0],%s],%s,%s,%s,\WObj:=%s;' % (pose_2_str(pose), extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA, self.TOOL_NAME, self.FRAME_NAME))
if conf_RLF is None:
conf_RLF = [0,0,0]
#self.addline('MoveL [%s,[0,0,0,0],%s],%s,%s,rdkTool,\WObj:=rdkWObj;' % (pose_2_str(pose), extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA))
[REAR, LOWERARM, FLIP] = conf_RLF
cf1 = math.floor(joints[0]/90.0)
cf4 = math.floor(joints[3]/90.0)
cf6 = math.floor(joints[5]/90.0)
cfx = 4*REAR + 2*LOWERARM + FLIP
#self.addline('MoveL [%s,[%i,%i,%i,%i],%s],%s,%s,%s,\WObj:=%s;' % (pose_2_str(pose), extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA, self.TOOL_NAME, self.FRAME_NAME))
self.addline('MoveL [%s,[%i,%i,%i,%i],%s],%s,%s,%s,\WObj:=%s;' % (pose_2_str(pose), cf1, cf4, cf6,cfx, extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA, self.TOOL_NAME, self.FRAME_NAME))
def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None):
"""Add a circular movement"""
target1 = ''
target2 = ''
if conf_RLF_1 is None:
conf_RLF_1 = [0,0,0]
cf1_1 = math.floor(joints1[0]/90.0)
cf4_1 = math.floor(joints1[3]/90.0)
cf6_1 = math.floor(joints1[5]/90.0)
[REAR, LOWERARM, FLIP] = conf_RLF_1
cfx_1 = 4*REAR + 2*LOWERARM + FLIP
target1 = '[%s,[%i,%i,%i,%i],%s]' % (pose_2_str(pose1), cf1_1, cf4_1, cf6_1,cfx_1, extaxes_2_str(joints1))
if conf_RLF_2 is None:
conf_RLF_2 = [0,0,0]
cf1_2 = math.floor(joints2[0]/90.0)
cf4_2 = math.floor(joints2[3]/90.0)
cf6_2 = math.floor(joints2[5]/90.0)
[REAR, LOWERARM, FLIP] = conf_RLF_2
cfx_2 = 4*REAR + 2*LOWERARM + FLIP
target2 = '[%s,[%i,%i,%i,%i],%s]' % (pose_2_str(pose2), cf1_2, cf4_2, cf6_2,cfx_2, extaxes_2_str(joints2))
self.addline('MoveC %s,%s,%s,%s,%s,\WObj:=%s;' % (target1, target2, self.SPEEDDATA, self.ZONEDATA, self.TOOL_NAME, self.FRAME_NAME))
def setFrame(self, pose, frame_id=None, frame_name=None):
"""Change the robot reference frame"""
#self.addline('rdkWObj := [FALSE, TRUE, "", [%s],[[0,0,0],[1,0,0,0]]];' % pose_2_str(pose))
#if frame_name == None:
# frame_name = self.FRAME_NAME
#frame_name = frame_name.replace(' ','_')
#self.FRAME_NAME = frame_name
self.addline('%s := [FALSE, TRUE, "", [%s],[[0,0,0],[1,0,0,0]]];' % (self.FRAME_NAME, pose_2_str(pose)))
def setTool(self, pose, tool_id=None, tool_name=None):
"""Change the robot TCP"""
#if tool_name == None:
# tool_name = self.TOOL_NAME
#tool_name = tool_name.replace(' ','_')
#self.TOOL_NAME = tool_name
self.addline('%s := [TRUE,[%s],[1,[0,0,50],[1,0,0,0],0,0,0.005]];' % (self.TOOL_NAME, pose_2_str(pose)))
def Pause(self, time_ms):
"""Pause the robot program"""
if time_ms <= 0:
self.addline('STOP;')
else:
self.addline('WaitTime %.3f' % (time_ms*0.001))
def setSpeed(self, speed_mms):
"""Changes the robot speed (in mm/s)"""
#self.SPEEDDATA = 'v%i' % speed_mms
self.SPEEDDATA = '[%.2f,500,5000,1000]' % speed_mms
def setAcceleration(self, accel_mmss):
"""Changes the robot acceleration (in mm/s2)"""
self.addlog('setAcceleration is not defined')
def setSpeedJoints(self, speed_degs):
"""Changes the robot joint speed (in deg/s)"""
self.addlog('setSpeedJoints not defined')
def setAccelerationJoints(self, accel_degss):
"""Changes the robot joint acceleration (in deg/s2)"""
self.addlog('setAccelerationJoints not defined')
def setZoneData(self, zone_mm):
"""Changes the zone data approach (makes the movement more smooth)"""
if zone_mm < 0:
self.ZONEDATA = 'fine'
else:
self.ZONEDATA = 'z%i' % zone_mm
def setDO(self, io_var, io_value):
"""Sets a variable (output) to a given value"""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'D_OUT_%s' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value | |
# coding=UTF-8
import numpy as np
import videoseam as vs
class weights_delegate(object):
"""Delegate class to manage the weightning for the graph construction"""
def __init__(self, parent, fill_with=np.inf, ndim=3):
super(weights_delegate, self).__init__()
self.parent = parent
self.fill_with = fill_with
self.ndim = ndim
# Given a list of vectors and a list of links, creates an appropriate dictionary
# @vectors a list of tuples, that represents a structure type
# @links a list (or numpy array) of numpy arrays, that contains weights to be assigned to a specific structure
#
# returns: a dictionary
#
# Example:
# @vectors [(2, 1, 1), (1, 0, 1)]
# @links [[[1, 2], [1, 0]], [[0, 1], [1, 1]]]
# returns: {(2, 1, 1): [[1, 2], [1, 0]], (1, 0, 1): [[0, 1], [1, 1]]}
def to_hash(self, vectors, links):
return {k: v for k, v in zip(vectors, links)}
# Given an n-dimensional tuple, resizes its dimensions to fit the class settings (self.ndim)
# @tupleval a tuple
# returns: another tuple with the correct dimension
#
# Example:
# @listval (2, 1, 1)
# returns (assuming self.ndim = 2): (1, 1)
def adjust_dim(self, tupleval):
if len(tupleval) == self.ndim:
return tupleval
resize = len(tupleval) - self.ndim
return tupleval[resize:]
# Given a list of n-dimensional tuple, resizes the dimension of each tuple to fit the class settings (self.ndim)
# @listval a list a tuple
# returns: a list of tuple with correct dimensions
#
# Example:
# @listval [(2, 1, 1), (1, 0, 1)]
# returns (assuming self.ndim = 2): [(1, 1), (0, 1)]
def adjust_list(self, listval):
return [self.adjust_dim(t) for t in listval]
# Given I, it creates look-forward energies for that I, associated to the correct structure (1, 1, 2)
# @I: An image skeleton (or a list of them)
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0, 3], [1, 0, 2, 4], [5, 2, 1, 3], [6, 2, 4, 3]]
# returns {(1, 1, 2): [[inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf]]}
def weights_structure(self, I):
vectors = self.adjust_list([(1, 1, 2)]) # left to right
links = np.zeros((1,) + I.shape)
# Formula: ((past_left - future_left)^2 + (past_right - future_right)^2) / 2
pastleft = I[..., 1:] - I[..., 0:-1]
futureleft = ((I[..., 1:-1] + I[..., 2:]) * 0.5) - I[..., 0:-2]
pastright = -pastleft # I[:, 0:-1] - I[:, 1:] = ME - sx
futureright = ((I[..., 0:-2] + I[..., 1:-1]) * 0.5) - I[..., 2:]
left = (pastleft[..., 0:-1] - futureleft) ** 2
right = (pastright[..., 0:-1] - futureright) ** 2
links[0, ..., 1:-2] = (left[..., 0:-1] + right[..., 1:]) * 0.5
links = links * self.parent.alpha
links[0, ..., -2] = self.fill_with
links[0, ..., 0] = self.fill_with
return self.to_hash(vectors, links)
# Given I, it creates look-forward energies for that I, associated to the correct structure (2, 1, 1)
# @I: An image skeleton (or a list of them)
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0, 3], [1, 0, 2, 4], [5, 2, 1, 3], [6, 2, 4, 3]]
# returns {(1, 1, 2): [[inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf]]}
def weights_structure_time(self, I):
vectors = [(2, 1, 1)]
links = np.zeros((1,) + I.shape)
pastleft = I[1:, :, :] - I[0:-1, :, :]
futureleft = ((I[1:-1, :, :] + I[2:, :, :]) * 0.5) - I[0:-2, :, :]
pastright = -pastleft # I[:, 0:-1] - I[:, 1:] = ME - sx
futureright = ((I[0:-2, :, :] + I[1:-1, :, :]) * 0.5) - I[2:, :, :]
left = (pastleft[0:-1, :, :] - futureleft) ** 2
right = (pastright[0:-1, :, :] - futureright) ** 2
links[0, 1:-2, :, :] = (left[0:-1, :, :] + right[1:, :, :]) * 0.5
links = links
links[0, -2, :, :] = self.fill_with
links[0, 0, :, :] = self.fill_with
return self.to_hash(vectors, links)
# A generic method to apply an energy function to a certain structure key
# @I: The referring image
# @A: The energy function
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 3], [1, 0, 5], [5, 2, 3]]
# @A [[2, 1], [1, 0], [5, 2]]
# returns {(1, 1, 2): [[2, 1, 0], [1, 0, 0], [5, 2, 0]]}
def weights_standard(self, I, A):
vectors = self.adjust_list([(1, 1, 2)]) # left to right
links = np.zeros((1,) + I.shape)
links[0, ..., 0:-1] = A
return self.to_hash(vectors, links)
# Applies the importance map to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @imp: importance map
# returns: a dictionary that associates a structure key to an array of weights
def weights_importance(self, I, imp):
return self.weights_standard(I, imp * self.parent.gamma)
# Applies the iterations count to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @imp: The iteration counter energy function
# returns: a dictionary that associates a structure key to an array of weights
def weights_iterations(self, I, ite):
return self.weights_standard(I, ite * self.parent.beta)
# Applies the vector map to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @vector: The vector tracking enegy function
# returns: a dictionary that associates a structure key to an array of weights
def weights_vector(self, I, V):
return self.weights_standard(I, V * self.parent.delta)
def weights_frame_iterations(self, I, ite):
vectors = [(2, 1, 1)] # left to right
links = np.zeros((1,) + I.shape)
links[0, 0:-1, :, :] = ite * self.parent.beta
return self.to_hash(vectors, links)
def weights_deepness(self, I):
vectors = [(2, 1, 1), (0, 1, 1)]
links = np.zeros((2,) + I.shape)
# In profondità sx
links[0, 0:-1, :, 1:-1] = np.abs(((I[0:-1, :, 1:-1] + I[0:-1, :, 2:]) * 0.5) - ((I[1:, :, 0:-2] + I[1:, :, 1:-1]) * 0.5))
# In profondità dx
links[1, 1:, :, 1:-1] = np.abs(((I[0:-1, :, 0:-2] + I[0:-1, :, 1:-1]) * 0.5) - ((I[1:, :, 1:-1] + I[1:, :, 2:]) * 0.5))
return self.to_hash(vectors, links)
def weights_diagonal(self, I):
vectors = [(0, 1, 2), (2, 1, 2)]
energy = (I[:, :, 0:-1] - (I[:, :, 0:-1] + I[:, :, 1:]) * 0.5) ** 2
links = np.zeros((2,) + I.shape)
links[0, 1:, :, 0:-1] = energy[0:-1]
links[1, :, :, 0:-1] = energy
links = links / self.parent.alpha
return self.to_hash(vectors, links)
# Given a bitmask list of methods and all the useful energy functions, generates a tuple of dictionaries,
# that create an associations between a structure key and it's own energy function
# @I: The referring image (skeleton)
# @Imp: The importance map
# @ite: The iteration counter energy function
# @V: The vector tracking enegy function
# @methods: A bit mask to identify which method should be actived
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0], [0, 1, 3], [2, 2, 2]]
# @Imp [[2, 2], [1, 3], [0, 0]]
# @ite [[2, 1], [1, 1], [2, 4]]
# @V [[0, 0], [0, 0], [0, 0]]
# @methods vs.IMP | vs.ITE
# returns ({(1, 1, 2): [[2, 2, 0], [1, 3, 0], [0, 0, 0]]}, {(1, 1, 2): [[2, 1, 0], [1, 1, 0], [2, 4, 0]]})
def select_methods(self, I, Imp, ite, V, methods):
all_weights = ()
if (vs.STR & methods) != 0:
all_weights += (self.weights_structure(I),)
if (vs.IMP & methods) != 0:
all_weights += (self.weights_importance(I, Imp),)
if (vs.ITE & methods) != 0:
all_weights += (self.weights_iterations(I, ite),)
if (vs.FIT & methods) != 0:
all_weights += (self.weights_frame_iterations(I, ite),)
if (vs.DEE & methods) != 0:
all_weights += (self.weights_deepness(I),)
if (vs.DIA & methods) != 0:
all_weights += | |
# -*- coding: utf-8 -*-
import ast
import builtins
import re
import token
import tokenize
import os.path
from thonny.assistance import ErrorHelper, Suggestion, name_similarity, add_error_helper
from thonny import assistance
from thonny.misc_utils import running_on_linux, running_on_windows
class SyntaxErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
self.tokens = []
self.token_error = None
if self.error_info["message"] == "EOL while scanning string literal":
self.intro_text = (
"You haven't properly closed the string on line %s." % self.error_info["lineno"]
+ "\n(If you want a multi-line string, then surround it with"
+ " `'''` or `\"\"\"` at both ends.)"
)
elif self.error_info["message"] == "EOF while scanning triple-quoted string literal":
# lineno is not useful, as it is at the end of the file and user probably
# didn't want the string to end there
self.intro_text = "You haven't properly closed a triple-quoted string"
else:
if self.error_info["filename"] and os.path.isfile(self.error_info["filename"]):
with open(self.error_info["filename"], mode="rb") as fp:
try:
for t in tokenize.tokenize(fp.readline):
self.tokens.append(t)
except tokenize.TokenError as e:
self.token_error = e
if not self.tokens or self.tokens[-1].type not in [
token.ERRORTOKEN,
token.ENDMARKER,
]:
self.tokens.append(tokenize.TokenInfo(token.ERRORTOKEN, "", None, None, ""))
else:
self.tokens = []
unbalanced = self._sug_unbalanced_parens()
if unbalanced:
self.intro_text = (
"Unbalanced parentheses, brackets or braces:\n\n" + unbalanced.body
)
self.intro_confidence = 5
else:
self.intro_text = "Python doesn't know how to read your program."
if "^" in str(self.error_info):
self.intro_text += (
"\n\nSmall `^` in the original error message shows where it gave up,"
+ " but the actual mistake can be before this."
)
self.suggestions = [self._sug_missing_or_misplaced_colon()]
def _sug_missing_or_misplaced_colon(self):
i = 0
title = "Did you forget the colon?"
relevance = 0
body = ""
while i < len(self.tokens) and self.tokens[i].type != token.ENDMARKER:
t = self.tokens[i]
if t.string in [
"if",
"elif",
"else",
"while",
"for",
"with",
"try",
"except",
"finally",
"class",
"def",
]:
keyword_pos = i
while (
self.tokens[i].type
not in [
token.NEWLINE,
token.ENDMARKER,
token.COLON, # colon may be OP
token.RBRACE,
]
and self.tokens[i].string != ":"
):
old_i = i
if self.tokens[i].string in "([{":
i = self._skip_braced_part(i)
assert i > old_i
if i == len(self.tokens):
return None
else:
i += 1
if self.tokens[i].string != ":":
relevance = 9
body = "`%s` header must end with a colon." % t.string
break
# Colon was present, but maybe it should have been right
# after the keyword.
if (
t.string in ["else", "try", "finally"]
and self.tokens[keyword_pos + 1].string != ":"
):
title = "Incorrect use of `%s`" % t.string
body = "Nothing is allowed between `%s` and colon." % t.string
relevance = 9
if (
self.tokens[keyword_pos + 1].type not in (token.NEWLINE, tokenize.COMMENT)
and t.string == "else"
):
body = "If you want to specify a conditon, then use `elif` or nested `if`."
break
i += 1
return Suggestion("missing-or-misplaced-colon", title, body, relevance)
def _sug_unbalanced_parens(self):
problem = self._find_first_braces_problem()
if not problem:
return None
return Suggestion("missing-or-misplaced-colon", "Unbalanced brackets", problem[1], 8)
def _sug_wrong_increment_op(self):
pass
def _sug_wrong_decrement_op(self):
pass
def _sug_wrong_comparison_op(self):
pass
def _sug_switched_assignment_sides(self):
pass
def _skip_braced_part(self, token_index):
assert self.tokens[token_index].string in ["(", "[", "{"]
level = 1
token_index += 1
while token_index < len(self.tokens):
if self.tokens[token_index].string in ["(", "[", "{"]:
level += 1
elif self.tokens[token_index].string in [")", "]", "}"]:
level -= 1
token_index += 1
if level <= 0:
return token_index
assert token_index == len(self.tokens)
return token_index
def _find_first_braces_problem(self):
# closers = {'(':')', '{':'}', '[':']'}
openers = {")": "(", "}": "{", "]": "["}
brace_stack = []
for t in self.tokens:
if t.string in ["(", "[", "{"]:
brace_stack.append(t)
elif t.string in [")", "]", "}"]:
if not brace_stack:
return (
t,
"Found '`%s`' at `line %d <%s>`_ without preceding matching '`%s`'"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
openers[t.string],
),
)
elif brace_stack[-1].string != openers[t.string]:
return (
t,
"Found '`%s`' at `line %d <%s>`__ when last unmatched opener was '`%s`' at `line %d <%s>`__"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
else:
brace_stack.pop()
if brace_stack:
return (
brace_stack[-1],
"'`%s`' at `line %d <%s>`_ is not closed by the end of the program"
% (
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
return None
class NameErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
names = re.findall(r"\'.*\'", error_info["message"])
assert len(names) == 1
self.name = names[0].strip("'")
self.intro_text = "Python doesn't know what `%s` stands for." % self.name
self.suggestions = [
self._sug_bad_spelling(),
self._sug_missing_quotes(),
self._sug_missing_import(),
self._sug_local_from_global(),
self._sug_not_defined_yet(),
]
def _sug_missing_quotes(self):
if self._is_attribute_value() or self._is_call_function() or self._is_subscript_value():
relevance = 0
else:
relevance = 5
return Suggestion(
"missing-quotes",
"Did you actually mean string (text)?",
'If you didn\'t mean a variable but literal text "%s", then surround it with quotes.'
% self.name,
relevance,
)
def _sug_bad_spelling(self):
# Yes, it would be more proper to consult builtins from the backend,
# but it's easier this way...
all_names = {name for name in dir(builtins) if not name.startswith("_")}
all_names |= {"pass", "break", "continue", "return", "yield"}
if self.last_frame.globals is not None:
all_names |= set(self.last_frame.globals.keys())
if self.last_frame.locals is not None:
all_names |= set(self.last_frame.locals.keys())
similar_names = {self.name}
if all_names:
relevance = 0
for name in all_names:
sim = name_similarity(name, self.name)
if sim > 4:
similar_names.add(name)
relevance = max(sim, relevance)
else:
relevance = 3
if len(similar_names) > 1:
body = "I found similar names. Are all of them spelled correctly?\n\n"
for name in sorted(similar_names, key=lambda x: x.lower()):
# TODO: add location info
body += "* `%s`\n\n" % name
else:
body = (
"Compare the name with corresponding definition / assignment / documentation."
+ " Don't forget that case of the letters matters!"
)
return Suggestion("bad-spelling-name", "Did you misspell it (somewhere)?", body, relevance)
def _sug_missing_import(self):
likely_importable_functions = {
"math": {"ceil", "floor", "sqrt", "sin", "cos", "degrees"},
"random": {"randint"},
"turtle": {
"left",
"right",
"forward",
"fd",
"goto",
"setpos",
"Turtle",
"penup",
"up",
"pendown",
"down",
"color",
"pencolor",
"fillcolor",
"begin_fill",
"end_fill",
"pensize",
"width",
},
"re": {"search", "match", "findall"},
"datetime": {"date", "time", "datetime", "today"},
"statistics": {
"mean",
"median",
"median_low",
"median_high",
"mode",
"pstdev",
"pvariance",
"stdev",
"variance",
},
"os": {"listdir"},
"time": {"time", "sleep"},
}
body = None
if self._is_call_function():
relevance = 5
for mod in likely_importable_functions:
if self.name in likely_importable_functions[mod]:
relevance += 3
body = (
"If you meant `%s` from module `%s`, then add\n\n`from %s import %s`\n\nto the beginning of your script."
% (self.name, mod, mod, self.name)
)
break
elif self._is_attribute_value():
relevance = 5
body = (
"If you meant module `%s`, then add `import %s` to the beginning of your script"
% (self.name, self.name)
)
if self.name in likely_importable_functions:
relevance += 3
elif self._is_subscript_value() and self.name != "argv":
relevance = 0
elif self.name == "pi":
body = "If you meant the constant π, then add `from math import pi` to the beginning of your script."
relevance = 8
elif self.name == "argv":
body = "If you meant the list with program arguments, then add `from sys import argv` to the beginning of your script."
relevance = 8
else:
relevance = 3
if body is None:
body = "Some functions/variables need to be imported before they can be used."
return Suggestion("missing-import", "Did you forget to import it?", body, relevance)
def _sug_local_from_global(self):
relevance = 0
body = None
if self.last_frame.code_name == "<module>" and self.last_frame_module_ast is not None:
function_names = set()
for node in ast.walk(self.last_frame_module_ast):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
if self.name in map(lambda x: x.arg, node.args.args):
function_names.add(node.name)
# TODO: varargs, kw, ...
declared_global = False
for localnode in ast.walk(node):
# print(node.name, localnode)
if (
isinstance(localnode, ast.Name)
and localnode.id == self.name
and isinstance(localnode.ctx, ast.Store)
):
function_names.add(node.name)
elif isinstance(localnode, ast.Global) and self.name in localnode.names:
declared_global = True
if node.name in function_names and declared_global:
function_names.remove(node.name)
if function_names:
relevance = 9
body = (
(
"Name `%s` defined in `%s` is not accessible in the global/module level."
% (self.name, " and ".join(function_names))
)
+ "\n\nIf you need that data at the global level, then consider changing the function so that it `return`-s the value."
)
return Suggestion(
"local-from-global",
"Are you trying to acces a local variable outside of the function?",
body,
relevance,
)
def _sug_not_defined_yet(self):
return Suggestion(
"not-defined-yet",
"Has Python executed the definition?",
(
"Don't forget that name becomes defined when | |
#!/usr/bin/env python
# encoding: utf-8
"""
ScriptFilter API for building Alfred ScriptFilter entries.
.. module:: scriptfilter
:platform: MacOSX
:synopsis: Create and build XML for Alfred's ScriptFilter output.
.. moduleauthor:: Ritashugisha <<EMAIL>>
`Documentation <?>`_.
`License MIT <http://opensource.org/licenses/MIT>`_.
`Copyright 2014 The Alfred Bundler Team`_.
-> Usage
===============================================================================
To include this api in your Python scripts, copy this ``scriptfilter.py`` to
a viable place for you to import.
Import the ScriptFilter client:
from scriptfilter import ScriptFilter
my_filter = ScriptFilter(debug=False)
Now that you have access to the client, you can add entries to the filter.
my_filter.add(
title='My Entry',
subtitle='A subtitle for my entry',
arg='My entry\'s passed argument',
adv_subtitle={
'shift': 'Subtitle when shift is pressed',
'cmd': 'Subtitle when cmd is pressed'
},
uid='my custom uid',
icon='path to your icon.png'
)
This will ensure that this entry will be rendered to XML (along with any
other entries that you add) that can be accepted by Alfred's ScriptFilter obj.
In order to obtain the XML, either print the filter name or grab the returned
value from the .get() method.
# Option 1
print my_filter
# Option 2
filter_output = my_filter.get()
-> Revisons
===============================================================================
1.1, 10-9-14: Initial build for just script filter output
"""
import os
import logging
import random
import xml.etree.ElementTree as etree
logging.basicConfig(
level=logging.DEBUG,
format=('[%(asctime)s] '
'[{}:%(lineno)d] '
'[%(levelname)s] '
'%(message)s').format(os.path.basename(__file__)),
datefmt='%H:%M:%S'
)
AUTHOR = 'The <NAME>'
DATE = '10-9-14'
VERSION = '1.0'
class ScriptFilter:
""" Script Filter class used for building XML for a script filter object.
Public class used to initailize the main items element and the entries list
Script filter element is built by:
_filter = bundler.wrapper('scriptfilter')
:returns: Built items XML
:rtype: ``str`` or ``unicode``
"""
def __init__(self, debug=False):
""" Initialize the ScriptFilter object.
:param debug: Allow debuggin for the script filter object.
:type debug: bool
"""
self.debug = debug
self.log = logging.getLogger(self.__class__.__name__)
self.header = '<?xml version="1.0" encoding="UTF-8"?>'
self.entries = []
self.items = etree.Element('items')
def __repr__(self):
""" Get the object's current XML output.
Simply printing or returning the object variable will call this method
return _filter
:returns: Built XML from the items element
:rtype: ``str`` or ``unicode``
"""
return self.get()
def _build_xml(self):
""" Build the XML for the current entries.
:returns: Built XML from the items element
:rtype: ``str`` or ``unicode``
"""
[i._build() for i in self.entries]
return '{}{}'.format(self.header, etree.tostring(self.items))
def get(self):
""" Grab the built XML if you need the values before printing them.
:returns: Built XML from the items element
:rtype: ``str`` or ``unicode``
"""
return self._build_xml()
def add(self, **passed):
""" Add an entry to the script filter object.
Parameters must be passed as variable references
add(title='Title', subtitle='Subtitle', arg='Argument')
:param title: Title of the entry
:type title: ``str`` or ``unicode``
:param subtitle: Subtitle of the entry
:type subtitle: ``str`` or ``unicode``
:param arg: Argument of entry
:type arg: ``str`` or ``unicode``
:param icon: Valid path to icon for entry
:type icon: ``str`` or ``unicode``
:param uid: UID of the entry
:type uid: ``str`` or ``unicode``
:param valid: True if the entry is valid
:type valid: ``bool``
:param autocomplete: String to be autocompleted
:type autocomplete: ``str`` or ``unicode``
:param type: Entry type ('file' or 'file:skipcheck')
:type type: ``str`` or ``unicode``
:param icon_type: Entry icon type ('fileicon' or 'filetype')
:type icon_type: ``str`` or ``unicode``
:param adv_subtitle: Advanced subtitle dictionary
:type adv_subtitle: ``dict``
:param adv_text: Advanced text dictionary
:type adv_text: ``dict``
"""
_new_entry = ScriptFilter.Entry(
self.items, self.log, self.debug
)._add(**passed)
if _new_entry != None:
self.entries.append(_new_entry)
class Entry:
""" Nested ScriptFilter Entry class used to build the XML for an entry.
Initializes the reference to both the ``items`` root as well as the
single item entry under self.item
:param root: The items element
:type root: xml.etree.ElementTree
"""
def __init__(self, root, log, debug):
""" Initializes the Entry object.
:param root: Root of the items node
:type root: xml.etree.ElementTree.Element
:param log: Log for the ScriptFilter object
:type log: logging.logger
:param debug: Allow debugging for entry manipulation
:type debug: bool
"""
self.log = log
self.debug = debug
self.root = root
self.entry_options = {
'title': (str, unicode,),
'subtitle': (str, unicode,),
'arg': (str, unicode,),
'icon': (str, unicode,),
'uid': (str, unicode,),
'valid': (bool,),
'autocomplete': (str, unicode,),
'type': (str, unicode,),
'icon_type': (str, unicode,),
'adv_subtitle': (dict,),
'adv_text': (dict,)
}
self.required_options = ['title']
self._template_adv_subtitle = {
'shift': None, 'fn': None, 'ctrl': None,
'alt': None, 'cmd': None
}
self._template_adv_text = {'copy': None, 'largetype': None}
self._available_type = ['file', 'file:skipcheck']
self._available_icon_type = ['fileicon', 'filetype']
self.item = {
'uid': None,
'valid': True,
'autocomplete': None,
'type': None,
'title': None,
'subtitle': None,
'arg': None,
'icon': None,
'icon_type': None,
'adv_subtitle': self._template_adv_subtitle,
'adv_text': self._template_adv_text
}
def _assign_passed(self, passed):
""" Assigned the passed variables to self.item dictionary.
:param passed: Passed argument dictionary
:type passed: **kwargs
"""
_new_passed = {}
for k, v in passed.iteritems():
_new_passed[k.lower()] = v
# Add passed variables to self.item if it is of valid type
for k, v in _new_passed.iteritems():
_found = False
for _k, _v in self.entry_options.iteritems():
if (k == _k):
_found = True
if type(v) not in _v:
if self.debug:
self.log.warning(', '.join([
'removing ({}) invalid type'.format(k),
'expected ({})'.format(' or '.join([
i.__name__ for i in _v
]))
]))
else:
self.item[k] = v
if not _found:
if self.debug:
self.log.warning(', '.join([
'removing ({}) unknown parameter'.format(k),
'available are ({})'.format(', '.join([
i for i in self.entry_options.keys()
]))
]))
self.item['valid'] = (
'no' if ('valid' in _new_passed.keys()) and \
not _new_passed['valid'] else 'yes'
)
self.item['autocomplete'] = (
_new_passed['autocomplete'] if 'autocomplete' in \
_new_passed.keys() else ''
)
self.item['uid'] = (
_new_passed['uid'] if 'uid' in _new_passed.keys() else None
)
self.item['type'] = (
_new_passed['type'] if 'type' in _new_passed.keys() else None
)
def _validate_item(self):
""" Validate (and fix) the self.item dictionary's values."""
_valid = True
# Validate that the required options are present
for i in self.required_options:
if self.item[i] == None:
if self.debug:
self.log.critical(', '.join([
'failed from required option ({})'.format(i),
'must be of type ({})'.format(' or '.join([
j.__name__ for j in self.entry_options[i]
]))
]))
_valid = False
# Fix up the advanced dictionary based parameters
for k, v in {
'adv_subtitle': self._template_adv_subtitle,
'adv_text': self._template_adv_text
}.iteritems():
_to_pop = []
for i in self.item[k]:
if (i not in v.keys()):
if self.debug:
self.log.warning(
'removing ({}:{}) invalid option'.format(k, i)
)
_to_pop.append(i)
[self.item[k].pop(i) for i in _to_pop]
if len(self.item[k].keys()) <= 0:
self.item[k] = v
# Fix up the explicit single selection based parameters
for k, v in {
'type': self._available_type,
'icon_type': self._available_icon_type
}.iteritems():
if self.item[k] != None and self.item[k] not in v:
if self.debug:
self.log.warning(', '.join([
'removing ({}) invalid name'.format(k),
'expected ({})'.format(' or '.join(v))
]))
self.item[k] = None
# Validate that the passed icon exists, defaults to icon.png
if self.item['icon'] != None:
if not os.path.exists(self.item['icon']):
if self.debug:
self.log.warning(', '.join([
'defaulting to (icon.png)',
'({}) does not exist'.format(self.item['icon'])
]))
self.item['icon'] = 'icon.png'
# If the UID is empty but the argument isn't,
# let the UID equal the argument
if (self.item['arg'] != None) and (self.item['uid'] == None):
if self.debug:
self.log.info(
'"uid" is None, setting "uid" value to "arg" value'
)
self.item['uid'] = self.item['arg']
# Backup method, assigns the UID to a random 5 digit number
if self.item['uid'] == None:
if self.debug:
self.log.info((
'"uid" is None and "arg" is None, setting "uid" to '
'random 5 digit integer'
))
self.item['uid'] = str(random.randint(10**(5-1), (10**5)-1))
return _valid
def _build(self):
""" Build the self.item dictionary into a ElementTree object.
Assumes that the self.item dictionary is validated
"""
_entry = etree.SubElement(self.root, 'item')
# Distinguish between entry attributes and sub-elements
_attribs = ['uid', 'valid', 'autocomplete', 'type']
_statics = ['title', 'subtitle', 'arg']
# Add attributes to entry
for i in _attribs:
if self.item[i] != None:
_entry.attrib[i] = self.item[i]
# Add sub-elements to entry
for i in _statics:
if self.item[i] != None:
_i_entry = etree.SubElement(_entry, i)
_i_entry.text = self.item[i]
# Format and add dictionary based parameters
for i in [
{
'tag': 'subtitle',
'attrib': 'mod',
'data': self.item['adv_subtitle']
},
{
'tag': 'text',
'attrib': 'type',
'data': self.item['adv_text']
}
]:
if not all(v == None for v in i['data'].itervalues()):
for _k, _v in i['data'].iteritems():
if _v != None:
_i_entry = etree.SubElement(_entry, i['tag'])
_i_entry.attrib[i['attrib']] = _k
_i_entry.text = _v
# Add the icon entry
_icon_entry = | |
<reponame>uktrade/exceptional-review-procedure
import json
from directory_components import forms
from directory_constants import choices
from directory_forms_api_client.forms import GovNotifyEmailActionMixin
from django.forms import Textarea, HiddenInput
from core import constants, fields
OTHER = 'OTHER'
INDUSTRY_CHOICES = [('', 'Please select')] + choices.SECTORS + [('OTHER', 'Other')]
INCOME_BRACKET_CHOICES = (
('', 'Please select'),
("0-11.85k", "Up to £11,850"),
("11.85k-46.35k", "£11,851 to £46,350"),
("46.35k-150k", "£46,351 to £150,000"),
("150k+", "Over £150,000")
)
TURNOVER_CHOICES = (
('', 'Please select'),
('0-25k', 'under £25,000'),
('25k-100k', '£25,000 - £100,000'),
('100k-1m', '£100,000 - £1,000,000'),
('1m-5m', '£1,000,000 - £5,000,000'),
('5m-25m', '£5,000,000 - £25,000,000'),
('25m-50m', '£25,000,000 - £50,000,000'),
('50m+', '£50,000,000+')
)
SALES_VOLUME_UNIT_CHOICES = [
('KILOGRAM', 'kilograms (kg)'),
('LITRE', 'litres'),
('METERS', 'meters'),
('UNITS', 'units (number of items)'),
(OTHER, 'Other')
]
COMPANY_TYPE_CHOICES = (
('LIMITED', 'UK private or public limited company'),
('OTHER', 'Other type of UK organisation'),
)
CHOICES_CHANGE_TYPE_VOLUME = (
(constants.ACTUAL, 'Actual change in volume'),
(constants.EXPECTED, 'Expected change in volume')
)
CHOICES_CHANGE_TYPE_PRICE = (
(constants.ACTUAL, 'Actual change in price'),
(constants.EXPECTED, 'Expected change in price')
)
CHOICES_CHANGE_TYPE = (
(constants.ACTUAL, 'Actual change'),
(constants.EXPECTED, 'Expected change')
)
CHOICES_CHANGE_TYPE_CHOICE = (
(constants.ACTUAL, 'Actual change in choice'),
(constants.EXPECTED, 'Expected change in choice')
)
HELP_TEXT_SELECT_CHANGE_TYPE = 'Select actual, expected, or both'
class ConsumerChoiceChangeForm(forms.Form):
choice_change_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE_CHOICE,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
choice_change_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class VolumeChangeForm(forms.Form):
volume_changed_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE_VOLUME,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
volumes_change_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class PriceChangeForm(forms.Form):
price_changed_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE_PRICE,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
price_change_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class MarketSizeChangeForm(forms.Form):
market_size_changed_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE_VOLUME,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
market_size_change_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class MarketPriceChangeForm(forms.Form):
market_price_changed_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE_PRICE,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
market_price_change_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class OtherChangesForm(forms.Form):
has_other_changes_type = forms.MultipleChoiceField(
label='',
help_text=HELP_TEXT_SELECT_CHANGE_TYPE,
choices=CHOICES_CHANGE_TYPE,
widget=forms.CheckboxSelectInlineLabelMultiple,
)
other_changes_comment = forms.CharField(
label="Tell us more",
widget=Textarea(attrs={'rows': 6}),
)
class MarketSizeDetailsForm(forms.Form):
market_size_year = forms.ChoiceField(
label='Financial year',
help_text='Give the most recent data you can.',
choices=(
('', 'Please select'),
('2019', '2019'),
('2018', '2018'),
('2017', '2017'),
),
)
market_size = forms.IntegerField(
label='Market value',
container_css_classes='form-group prefix-pound',
)
class RoutingUserTypeForm(forms.Form):
LABEL_UK_BUSINESS = "As a UK business or trade organisation"
LABEL_UK_CONSUMER = "As a UK consumer or consumer representative"
LABEL_DEVELOPING_COUNTRY_COMPANY = (
"As an exporter, or representative, from a developing country eligible"
" for the Generalised Scheme of Preferences (GSP) or with an Economic Partnership"
" Agreement (EPA) with the UK."
)
CHOICES = (
(constants.UK_BUSINESS, LABEL_UK_BUSINESS),
(constants.UK_CONSUMER, LABEL_UK_CONSUMER),
(constants.DEVELOPING_COUNTRY_COMPANY, LABEL_DEVELOPING_COUNTRY_COMPANY),
)
choice = forms.ChoiceField(
label='',
widget=forms.RadioSelect(),
choices=CHOICES,
)
class RoutingImportFromOverseasForm(forms.Form):
choice = fields.TypedChoiceField(
label='',
coerce=lambda x: x == 'True',
choices=[
(True, 'I import the affected goods from overseas'),
(False, 'I produce the affected goods in the UK')
],
widget=forms.RadioSelect,
)
class ProductSearchForm(forms.Form):
MESSAGE_MISSING_PRODUCT = 'Please specify an affected product'
term = forms.CharField(
label='',
help_text="To feedback on other types of goods, you'll need to submit another form afterwards.",
required=False,
container_css_classes='form-group text-input-with-submit-button-container',
widget=fields.TextInputWithSubmitButton(attrs={'form': 'search-form'}),
)
commodity = forms.CharField(
label='Commodity codes',
help_text='Find the commodity codes via the commodity code browser.',
widget=HiddenInput,
)
def clean(self):
super().clean()
if not self.cleaned_data.get('commodity'):
self.add_error('term', self.MESSAGE_MISSING_PRODUCT)
def clean_commodity(self):
return json.loads(self.cleaned_data['commodity'])
class OtherMetricNameForm(forms.Form):
other_metric_name = forms.CharField(label='Metric name')
Q3_2019_LABEL = 'July to September 2019'
Q2_2019_LABEL = 'April to June 2019'
Q1_2019_LABEL = 'January to March 2019'
Q4_2018_label = 'October to December 2018'
class SalesVolumeBeforeBrexitForm(fields.BindNestedFormMixin, forms.Form):
sales_volume_unit = fields.RadioNested(
label='Select a metric',
choices=SALES_VOLUME_UNIT_CHOICES,
nested_form_class=OtherMetricNameForm,
nested_form_choice=OTHER,
)
quarter_three_2019_sales_volume = forms.IntegerField(
label=Q3_2019_LABEL
)
quarter_two_2019_sales_volume = forms.IntegerField(
label=Q2_2019_LABEL
)
quarter_one_2019_sales_volume = forms.IntegerField(
label=Q1_2019_LABEL
)
quarter_four_2018_sales_volume = forms.IntegerField(
label=Q4_2018_label
)
class SalesRevenueBeforeBrexitForm(forms.Form):
quarter_three_2019_sales_revenue = forms.IntegerField(
label=Q3_2019_LABEL,
container_css_classes='form-group prefix-pound',
)
quarter_two_2019_sales_revenue = forms.IntegerField(
label=Q2_2019_LABEL,
container_css_classes='form-group prefix-pound',
)
quarter_one_2019_sales_revenue = forms.IntegerField(
label=Q1_2019_LABEL,
container_css_classes='form-group prefix-pound',
)
quarter_four_2018_sales_revenue = forms.IntegerField(
label=Q4_2018_label,
container_css_classes='form-group prefix-pound',
)
class SalesAfterBrexitForm(fields.BindNestedFormMixin, forms.Form):
has_volume_changed = fields.RadioNested(
label='Import volumes',
nested_form_class=VolumeChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my import volumes for these goods"),
(False, "I'm not aware of changes to my import volumes for these goods")
],
)
has_price_changed = fields.RadioNested(
label='Sales prices',
nested_form_class=PriceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my prices for products related to these goods"),
(False, "I'm not aware of changes to my prices for products related to these goods")
],
)
class ExportsAfterBrexitForm(fields.BindNestedFormMixin, forms.Form):
has_volume_changed = fields.RadioNested(
label='Export volumes',
nested_form_class=VolumeChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my UK export volumes for these goods"),
(False, "I'm not aware of changes to my import volumes for these goods")
],
)
has_price_changed = fields.RadioNested(
label='Prices changes',
nested_form_class=PriceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my UK export prices for these goods"),
(False, "I'm not aware of changes to my UK export prices for these goods")
],
)
class MarketSizeAfterBrexitForm(fields.BindNestedFormMixin, forms.Form):
has_market_size_changed = fields.RadioNested(
label='Sales volume',
nested_form_class=MarketSizeChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my sales volumes for these goods"),
(False, "I'm not aware of changes to my sales volumes for these goods")
],
)
has_market_price_changed = fields.RadioNested(
label='Sales price',
nested_form_class=MarketPriceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to my prices for these imported goods"),
(False, "I'm not aware of changes to my prices for these imported goods")
],
)
class ExportMarketSizeAfterBrexitForm(fields.BindNestedFormMixin, forms.Form):
has_market_size_changed = fields.RadioNested(
label='Sales volume',
nested_form_class=MarketSizeChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes in volume for others exporting these goods to the UK"),
(False, "I'm not aware of changes in volume for others exporting these goods to the UK")
],
)
has_market_price_changed = fields.RadioNested(
label='Sales price',
nested_form_class=MarketPriceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(
True,
"I'm aware of changes to the prices others are selling these goods for when exporting to the UK"
),
(
False,
"I'm not aware of changes to the prices others are selling these goods for when exporting to the UK"
)
],
)
class OtherChangesAfterBrexitForm(fields.BindNestedFormMixin, forms.Form):
has_other_changes = fields.RadioNested(
label='',
nested_form_class=OtherChangesForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of other changes to my business"),
(False, "I'm not aware of other changes to my business"),
],
)
other_information = forms.CharField(
label='Other information',
help_text=(
'Use this opportunity to give us any other supporting information.'
' Do not include any sensetive information.'
),
widget=Textarea(attrs={'rows': 6}),
required=False,
)
class MarketSizeForm(fields.BindNestedFormMixin, forms.Form):
market_size_known = fields.RadioNested(
label='',
nested_form_class=MarketSizeDetailsForm,
coerce=lambda x: x == 'True',
choices=[(True, 'Yes'), (False, 'No')],
)
class OtherInformationForm(forms.Form):
other_information = forms.CharField(
label='',
widget=Textarea(attrs={'rows': 6}),
required=False,
)
class ConsumerTypeForm(forms.Form):
consumer_type = forms.ChoiceField(
label='',
choices=(
(constants.CONSUMER_GROUP, 'Consumer group'),
(constants.INDIVIDUAL_CONSUMER, 'Individual consumer'),
),
widget=forms.RadioSelect(),
)
class OutcomeForm(fields.BindNestedFormMixin, forms.Form):
tariff_rate = forms.ChoiceField(
label='Tariff rate change',
choices=[
(constants.INCREASE, 'I want the tariff rate increased'),
(constants.DECREASE, 'I want the tariff rate decreased'),
('N/A', 'I want neither'),
],
widget=forms.RadioSelect(),
)
tariff_quota = forms.ChoiceField(
label='Tariff quota change',
choices=[
(constants.INCREASE, 'I want the tariff quota increased '),
(constants.DECREASE, 'I want the tariff quota decreased'),
('N/A', 'I want neither'),
],
widget=forms.RadioSelect(),
)
class BusinessDetailsForm(fields.BindNestedFormMixin, forms.Form):
company_type = forms.ChoiceField(
label='Company type',
label_suffix='',
widget=forms.RadioSelect(),
choices=COMPANY_TYPE_CHOICES,
)
company_name = forms.CharField(label='Company name')
company_number = forms.CharField(
required=False,
container_css_classes='form-group js-disabled-only'
)
sector = forms.ChoiceField(
label='Which industry are you in?',
choices=INDUSTRY_CHOICES,
)
employees = forms.ChoiceField(
label='Number of employees',
choices=(('', 'Please select'),) + choices.EMPLOYEES,
required=False,
)
turnover = forms.ChoiceField(
label='Annual turnover for 2018-19',
choices=TURNOVER_CHOICES,
required=False,
)
employment_regions = fields.MultipleChoiceAutocomplateField(
label='Where do you employ the most people?',
choices=choices.EXPERTISE_REGION_CHOICES,
)
class PersonalDetailsForm(forms.Form):
given_name = forms.CharField(label='Given name',)
family_name = forms.CharField(label='Family name')
email = forms.EmailField(label='Email address')
class ConsumerPersonalDetailsForm(forms.Form):
given_name = forms.CharField(label='Given name',)
family_name = forms.CharField(label='Family name')
email = forms.EmailField(label='Email address')
income_bracket = forms.ChoiceField(
label='Personal income before tax (optional)',
required=False,
choices=INCOME_BRACKET_CHOICES
)
consumer_region = forms.ChoiceField(
label='Where do you live (optional)?',
choices=[('', 'Please select')] + choices.EXPERTISE_REGION_CHOICES,
required=False,
)
class SummaryForm(forms.Form):
captcha = fields.ReCaptchaField(
label='',
label_suffix='',
container_css_classes='govuk-!-margin-top-6 govuk-!-margin-bottom-6',
)
class SaveForLaterForm(GovNotifyEmailActionMixin, forms.Form):
email = forms.EmailField(label='Email address')
url = forms.CharField(widget=HiddenInput(), disabled=True)
expiry_timestamp = forms.CharField(widget=HiddenInput(), disabled=True)
class ConsumerChangeForm(fields.BindNestedFormMixin, forms.Form):
has_consumer_price_changed = fields.RadioNested(
label='Sales changes',
nested_form_class=PriceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of price changes for these goods"),
(False, "I'm not aware of price changes for these goods"),
],
)
has_consumer_choice_changed = fields.RadioNested(
label='Choice changes',
nested_form_class=ConsumerChoiceChangeForm,
coerce=lambda x: x == 'True',
choices=[
(True, "I'm aware of changes to consumer choice for these goods"),
(False, "I'm not aware of changes to consumer choice for these goods"),
],
)
class ConsumerGroupForm(forms.Form):
given_name = forms.CharField(label='Given name',)
| |
brif
network_iface = dev
break
return QEMUCMDTEMPLATE % {'IID': iid,
'NETWORK_TYPE' : network_type,
'NET_BRIDGE' : network_bridge,
'NET_INTERFACE' : network_iface,
'START_NET' : startNetwork(network),
'STOP_NET' : stopNetwork(network),
'ARCHEND' : arch + endianness,
'QEMU_DISK' : qemuDisk,
'QEMU_INIT' : qemuInitValue,
'QEMU_NETWORK' : qemuNetworkConfig(arch, network, isUserNetwork, ports),
'QEMU_ENV_VARS' : qemuEnvVars}
def getNetworkList(data, ifacesWithIps, macChanges):
global debug
networkList = []
deviceHasBridge = False
for iwi in ifacesWithIps:
if iwi[0] == 'lo': # skip local network
continue
#find all interfaces that are bridged with that interface
brifs = findIfacesForBridge(data, iwi[0])
if debug:
print("brifs for %s %r" % (iwi[0], brifs))
for dev in brifs:
#find vlan_ids for all interfaces in the bridge
vlans = findVlanInfoForDev(data, dev)
#create a config for each tuple
config = buildConfig(iwi, dev, vlans, macChanges)
if config not in networkList:
networkList.append(config)
deviceHasBridge = True
#if there is no bridge just add the interface
if not brifs and not deviceHasBridge:
vlans = findVlanInfoForDev(data, iwi[0])
config = buildConfig(iwi, iwi[0], vlans, macChanges)
if config not in networkList:
networkList.append(config)
if checkVariable("FIRMAE_NETWORK"):
return networkList
else:
ips = set()
pruned_network = []
for n in networkList:
if n[0] not in ips:
ips.add(n[0])
pruned_network.append(n)
else:
if debug:
print("duplicate ip address for interface: ", n)
return pruned_network
def readWithException(filePath):
fileData = ''
with open(filePath, 'rb') as f:
while True:
try:
line = f.readline().decode()
if not line:
break
fileData += line
except:
fileData += ''
return fileData
def inferNetwork(iid, arch, endianness, init):
global SCRIPTDIR
global SCRATCHDIR
TIMEOUT = int(os.environ['TIMEOUT'])
targetDir = SCRATCHDIR + '/' + str(iid)
loopFile = mountImage(targetDir)
fileType = subprocess.check_output(["file", "-b", "%s/image/%s" % (targetDir, init)]).decode().strip()
print("[*] Infer test: %s (%s)" % (init, fileType))
with open(targetDir + '/image/firmadyne/network_type', 'w') as out:
out.write("None")
qemuInitValue = 'rdinit=/firmadyne/preInit.sh'
if os.path.exists(targetDir + '/service'):
webService = open(targetDir + '/service').read().strip()
else:
webService = None
print("[*] web service: %s" % webService)
targetFile = ''
targetData = ''
out = None
if not init.endswith('preInit.sh'): # rcS, preinit
if fileType.find('ELF') == -1 and fileType.find("symbolic link") == -1: # maybe script
targetFile = targetDir + '/image/' + init
targetData = readWithException(targetFile)
out = open(targetFile, 'a')
# netgear R6200
elif fileType.find('ELF') != -1 or fileType.find("symbolic link") != -1:
qemuInitValue = qemuInitValue[2:] # remove 'rd'
targetFile = targetDir + '/image/firmadyne/preInit.sh'
targetData = readWithException(targetFile)
out = open(targetFile, 'a')
out.write(init + ' &\n')
else: # preInit.sh
out = open(targetDir + '/image/firmadyne/preInit.sh', 'a')
if out:
out.write('\n/firmadyne/network.sh &\n')
out.write('/firmadyne/run_service.sh &\n')
# trendnet TEW-828DRU_1.0.7.2, etc...
out.write('sleep 36000\n')
out.close()
umountImage(targetDir, loopFile)
print("Running firmware %d: terminating after %d secs..." % (iid, TIMEOUT))
cmd = "timeout --preserve-status --signal SIGINT {0} ".format(TIMEOUT)
cmd += "{0}/run.{1}.sh \"{2}\" \"{3}\" ".format(SCRIPTDIR,
arch + endianness,
iid,
qemuInitValue)
cmd += " 2>&1 > /dev/null"
os.system(cmd)
loopFile = mountImage(targetDir)
if not os.path.exists(targetDir + '/image/firmadyne/nvram_files'):
print("Infer NVRAM default file!\n")
os.system("{}/inferDefault.py {}".format(SCRIPTDIR, iid))
umountImage(targetDir, loopFile)
data = open("%s/qemu.initial.serial.log" % targetDir, 'rb').read()
ports = findPorts(data, endianness)
#find interfaces with non loopback ip addresses
ifacesWithIps = findNonLoInterfaces(data, endianness)
#find changes of mac addresses for devices
macChanges = findMacChanges(data, endianness)
print('[*] Interfaces: %r' % ifacesWithIps)
networkList = getNetworkList(data, ifacesWithIps, macChanges)
return qemuInitValue, networkList, targetFile, targetData, ports
def checkNetwork(networkList):
filterNetworkList = []
devList = ["eth0", "eth1", "eth2", "eth3"]
result = "None"
if checkVariable("FIRMAE_NETWORK"):
devs = [dev for (ip, dev, vlan, mac, brif) in networkList]
devs = set(devs)
ips = [ip for (ip, dev, vlan, mac, brif) in networkList]
ips = set(ips)
# check "ethX" and bridge interfaces
# bridge interface can be named guest-lan1, br0
# wnr2000v4-V1.0.0.70.zip - mipseb
# [('192.168.1.1', 'br0', None, None, 'br0'), ('10.0.2.15', 'eth0', None, None, 'br1')]
# R6900
# [('192.168.1.1', 'br0', None, None, 'br0'), ('172.16.17.32', 'eth0', None, None, 'eth0')]
if (len(devs) > 1 and
any([dev.startswith('eth') for dev in devs]) and
any([not dev.startswith('eth') for dev in devs])):
print("[*] Check router")
# remove dhcp ip address
networkList = [network for network in networkList if not network[1].startswith("eth")]
# linksys FW_LAPAC1200_LAPAC1750_1.1.03.000
# [('192.168.1.252', 'eth0', None, None, 'br0'), ('10.0.2.15', 'eth0', None, None, 'br0')]
elif (len(ips) > 1 and
any([ip.startswith("10.0.2.") for ip in ips]) and
any([not ip.startswith("10.0.2.") for ip in ips])):
print("[*] Check router")
# remove dhcp ip address
networkList = [network for network in networkList if not network[0].startswith("10.0.2.")]
# br and eth
if networkList:
vlanNetworkList = [network for network in networkList if not network[0].endswith(".0.0.0") and network[1].startswith("eth") and network[2] != None]
ethNetworkList = [network for network in networkList if not network[0].endswith(".0.0.0") and network[1].startswith("eth")]
invalidEthNetworkList = [network for network in networkList if network[0].endswith(".0.0.0") and network[1].startswith("eth")]
brNetworkList = [network for network in networkList if not network[0].endswith(".0.0.0") and not network[1].startswith("eth")]
invalidBrNetworkList = [network for network in networkList if network[0].endswith(".0.0.0") and not network[1].startswith("eth")]
if vlanNetworkList:
print("has vlan ethernet")
filterNetworkList = vlanNetworkList
result = "normal"
elif ethNetworkList:
print("has ethernet")
filterNetworkList = ethNetworkList
result = "normal"
elif invalidEthNetworkList:
print("has ethernet and invalid IP")
for (ip, dev, vlan, mac, brif) in invalidEthNetworkList:
filterNetworkList.append(('192.168.0.1', dev, vlan, mac, brif))
result = "reload"
elif brNetworkList:
print("only has bridge interface")
for (ip, dev, vlan, mac, brif) in brNetworkList:
if devList:
dev = devList.pop(0)
filterNetworkList.append((ip, dev, vlan, mac, brif))
result = "bridge"
elif invalidBrNetworkList:
print("only has bridge interface and invalid IP")
for (ip, dev, vlan, mac, brif) in invalidBrNetworkList:
if devList:
dev = devList.pop(0)
filterNetworkList.append(('192.168.0.1', dev, vlan, mac, brif))
result = "bridgereload"
else:
print("[*] no network interface: bring up default network")
filterNetworkList.append(('192.168.0.1', 'eth0', None, None, "br0"))
result = "default"
else: # if checkVariable("FIRMAE_NETWORK"):
filterNetworkList = networkList
return filterNetworkList, result # (network_type)
def process(iid, arch, endianness, makeQemuCmd=False, outfile=None):
success = False
global SCRIPTDIR
global SCRATCHDIR
for init in open(SCRATCHDIR + "/" + str(iid) + "/init").read().split('\n')[:-1]:
with open(SCRATCHDIR + "/" + str(iid) + "/current_init", 'w') as out:
out.write(init)
qemuInitValue, networkList, targetFile, targetData, ports = inferNetwork(iid, arch, endianness, init)
print("[*] ports: %r" % ports)
# check network interfaces and add script in the file system
# return the fixed network interface
print("[*] networkInfo: %r" % networkList)
filterNetworkList, network_type = checkNetwork(networkList)
print("[*] filter network info: %r" % filterNetworkList)
# filter ip
# some firmware uses multiple network interfaces for one bridge
# netgear WNDR3400v2-V1.0.0.54_1.0.82.zip - check only one IP
# asus FW_RT_AC87U_300438250702
# [('192.168.1.1', 'eth0', None, None), ('169.254.39.3', 'eth1', None, None), ('169.254.39.1', 'eth2', None, None), ('169.254.39.166', 'eth3', None, None)]
if filterNetworkList:
ips = [ip for (ip, dev, vlan, mac, brif) in filterNetworkList]
ips = set(ips)
with open(SCRATCHDIR + "/" + str(iid) + "/ip_num", 'w') as out:
out.write(str(len(ips)))
for idx, ip in enumerate(ips):
with open(SCRATCHDIR + "/" + str(iid) + "/ip." + str(idx), 'w') as out:
out.write(str(ip))
isUserNetwork = any(isDhcpIp(ip) for ip in ips)
with open(SCRATCHDIR + "/" + str(iid) + "/isDhcp", "w") as out:
if isUserNetwork:
out.write("true")
else:
out.write("false")
qemuCommandLine = qemuCmd(iid,
filterNetworkList,
ports,
network_type,
arch,
endianness,
qemuInitValue,
isUserNetwork)
with open(outfile, "w") as out:
out.write(qemuCommandLine)
os.chmod(outfile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
os.system('./scripts/test_emulation.sh {} {}'.format(iid, arch + endianness))
if (os.path.exists(SCRATCHDIR + '/' + str(iid) + '/web') and
open(SCRATCHDIR + '/' + str(iid) + '/web').read().strip() == 'true'):
success = True
break
# restore infer network data
# targetData is '' when init is preInit.sh
if targetData != '':
targetDir = SCRATCHDIR + '/' + str(iid)
loopFile = mountImage(targetDir)
with open(targetFile, 'w') as out:
out.write(targetData)
umountImage(targetDir, loopFile)
return success
def archEnd(value):
arch = None
end = None
tmp = value.lower()
if tmp.startswith("mips"):
arch = "mips"
elif tmp.startswith("arm"):
arch = "arm"
if tmp.endswith("el"):
end = "el"
elif tmp.endswith("eb"):
end = "eb"
return (arch, end)
def getWorkDir():
if os.path.isfile("./firmae.config"):
return os.getcwd()
elif os.path.isfile("../firmae.config"):
path = os.getcwd()
return path[:path.rfind('/')]
else:
return None
def main():
makeQemuCmd = False
iid = None
outfile = None
arch = None
endianness = None
workDir = getWorkDir()
if not workDir:
raise Exception("Can't find firmae.config file")
global SCRATCHDIR
global SCRIPTDIR
SCRATCHDIR = workDir + '/scratch'
SCRIPTDIR = workDir + '/scripts'
(opts, argv) = getopt.getopt(sys.argv[1:], 'i:a:oqd')
for (k, v) in opts:
if k == '-d':
global debug
debug += 1
if k == '-q':
makeQemuCmd = True
if k == '-i':
iid = int(v)
if k == '-o':
outfile = True
if k == '-a':
(arch, endianness) = archEnd(v)
if not | |
<filename>tgen/seq2seq.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import re
import numpy as np
import tensorflow as tf
import pickle as pickle
from itertools import zip_longest, groupby
import sys
import math
import tempfile
import shutil
import os
from functools import partial
from tgen.logf import log_info, log_debug, log_warn
from tgen.futil import read_das, chunk_list, file_stream, read_trees_or_tokens
from tgen.embeddings import DAEmbeddingSeq2SeqExtract, TokenEmbeddingSeq2SeqExtract, \
TreeEmbeddingSeq2SeqExtract, ContextDAEmbeddingSeq2SeqExtract, \
TaggedLemmasEmbeddingSeq2SeqExtract
from tgen.rnd import rnd
from tgen.planner import SentencePlanner
from tgen.tree import TreeData, TreeNode
from tgen.eval import Evaluator, SlotErrAnalyzer
from tgen.bleu import BLEUMeasure
from tgen.tfclassif import Reranker
from tgen.tf_ml import TFModel, embedding_attention_seq2seq_context
from tgen.ml import softmax
from tgen.lexicalize import Lexicalizer
import tgen.externals.seq2seq as tf06s2s
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks, from Python Itertools recipes."
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def cut_batch_into_steps(batch):
"""Take a batch (list of examples, which are lists of steps/words themselves), and slice
it along the other dimension – return a list of steps/words, each containing a numpy array of
items for the given step for all examples from the batch.
"""
return np.squeeze(np.array(np.split(np.array([ex for ex in batch if ex is not None]),
len(batch[0]), axis=1)), axis=2)
class Seq2SeqBase(SentencePlanner):
"""A common ancestor for the Plain and Ensemble Seq2Seq generators (decoding methods only)."""
def __init__(self, cfg):
super(Seq2SeqBase, self).__init__(cfg)
# save the whole configuration for later use (save/load, construction of embedding
# extractors)
self.cfg = cfg
# decoding options
self.beam_size = cfg.get('beam_size', 1)
self.sample_top_k = cfg.get('sample_top_k', 1)
self.length_norm_weight = cfg.get('length_norm_weight', 0.0)
self.context_bleu_weight = cfg.get('context_bleu_weight', 0.0)
self.context_bleu_metric = cfg.get('context_bleu_metric', 'bleu')
self.validation_delex_slots = cfg.get('validation_delex_slots', set())
if self.validation_delex_slots:
self.validation_delex_slots = set(self.validation_delex_slots.split(','))
self.slot_err_stats = None
self.classif_filter = None
if cfg.get('classif_filter'):
# use the specialized settings for the reranking classifier
rerank_cfg = cfg['classif_filter']
# plus, copy some settings from the main Seq2Seq module (so we're consistent)
for setting in ['mode', 'use_tokens', 'embeddings_lowercase',
'embeddings_split_plurals', 'tb_summary_dir']:
if setting in cfg:
rerank_cfg[setting] = cfg[setting]
self.classif_filter = Reranker.get_model_type(rerank_cfg)(rerank_cfg)
self.misfit_penalty = cfg.get('misfit_penalty', 100)
self.lexicalizer = None
if cfg.get('lexicalizer'):
# build a lexicalizer with the given settings
lexer_cfg = cfg['lexicalizer']
for setting in ['mode', 'language']:
if setting in cfg:
lexer_cfg[setting] = cfg[setting]
self.lexicalizer = Lexicalizer(lexer_cfg)
self.init_slot_err_stats()
def process_das(self, das, gold_trees=None):
"""
Process a list of input DAs, return the corresponding trees (using the generator
network with current parameters).
@param das: input DAs
@param gold_trees: (optional) gold trees against which cost is computed
@return: generated trees as `TreeData` instances, cost if `gold_trees` are given
"""
# encoder inputs
enc_inputs = cut_batch_into_steps([self.da_embs.get_embeddings(da)
for da in das])
if self.beam_size > 1 and len(das) == 1:
dec_output_ids = self._beam_search(enc_inputs, das[0])
dec_cost = None
else:
dec_output_ids, dec_cost = self._greedy_decoding(enc_inputs, gold_trees)
dec_trees = [self.tree_embs.ids_to_tree(ids) for ids in dec_output_ids.transpose()]
# return result (trees and optionally cost)
if dec_cost is None:
return dec_trees
return dec_trees, dec_cost
def _greedy_decoding(self, enc_inputs, gold_trees):
"""Run greedy decoding with the given encoder inputs; optionally use given gold trees
as decoder inputs for cost computation."""
# prepare decoder inputs (either fake, or true but used just for cost computation)
if gold_trees is None:
empty_tree_emb = self.tree_embs.get_embeddings(TreeData())
dec_inputs = cut_batch_into_steps([empty_tree_emb for _ in enc_inputs[0]])
else:
dec_inputs = cut_batch_into_steps([self.tree_embs.get_embeddings(tree)
for tree in gold_trees])
# run the decoding per se
dec_output_ids, dec_cost = self._get_greedy_decoder_output(
enc_inputs, dec_inputs, compute_cost=gold_trees is not None)
return dec_output_ids, dec_cost
def _get_greedy_decoder_output(initial_state, enc_inputs, dec_inputs, compute_cost=False):
raise NotImplementedError()
class DecodingPath(object):
"""A decoding path to be used in beam search."""
__slots__ = ['stop_token_id', 'dec_inputs', 'dec_states', 'logprob', '_length']
def __init__(self, stop_token_id, dec_inputs=[], dec_states=[], logprob=0.0, length=-1):
self.stop_token_id = stop_token_id
self.dec_inputs = list(dec_inputs)
self.dec_states = list(dec_states)
self.logprob = logprob
self._length = length if length >= 0 else len(dec_inputs)
def expand(self, max_variants, dec_out_probs, dec_state):
"""Expand the path with all possible outputs, updating the log probabilities.
@param max_variants: expand to this number of variants at maximum, discard the less \
probable ones
@param dec_output: the decoder output scores for the current step
@param dec_state: the decoder hidden state for the current step
@return: an array of all possible continuations of this path
"""
ret = []
# select only up to max_variants most probable variants
top_n_idx = np.argpartition(-dec_out_probs, max_variants)[:max_variants]
for idx in top_n_idx:
expanded = Seq2SeqGen.DecodingPath(self.stop_token_id,
self.dec_inputs, self.dec_states, self.logprob,
len(self))
if len(self) == len(self.dec_inputs) and idx != self.stop_token_id:
expanded._length += 1
expanded.logprob += np.log(dec_out_probs[idx])
expanded.dec_inputs.append(np.array(idx, ndmin=1))
expanded.dec_states.append(dec_state)
ret.append(expanded)
return ret
def __len__(self):
"""Return decoding path length (number of decoder input tokens)."""
return self._length
def _beam_search(self, enc_inputs, da):
"""Run beam search decoding."""
# true "batches" not implemented
assert len(enc_inputs[0]) == 1
# run greedy decoder for comparison (debugging purposes)
log_debug("GREEDY DEC WOULD RETURN:\n" +
" ".join(self.tree_embs.ids_to_strings(
[out_tok[0] for out_tok in self._greedy_decoding(enc_inputs, None)[0]])))
# initialize
self._init_beam_search(enc_inputs)
empty_tree_emb = self.tree_embs.get_embeddings(TreeData())
dec_inputs = cut_batch_into_steps([empty_tree_emb])
paths = [self.DecodingPath(stop_token_id=self.tree_embs.STOP, dec_inputs=[dec_inputs[0]])]
# beam search steps
for step in range(len(dec_inputs)):
new_paths = []
for path in paths:
out_probs, st = self._beam_search_step(path.dec_inputs, path.dec_states)
new_paths.extend(path.expand(self.beam_size, out_probs, st))
# length-weighted comparison of two paths' logprobs
paths = sorted(new_paths,
key=lambda p: p.logprob / (len(p) ** self.length_norm_weight),
reverse=True)[:self.beam_size]
if all([p.dec_inputs[-1] == self.tree_embs.VOID for p in paths]):
break # stop decoding if we have reached the end in all paths
log_debug(("\nBEAM SEARCH STEP %d\n" % step) +
"\n".join([("%f\t" % p.logprob) +
" ".join(self.tree_embs.ids_to_strings([inp[0] for inp in p.dec_inputs]))
for p in paths]) + "\n")
# rerank paths by their distance to the input DA
if self.classif_filter or self.context_bleu_weight:
paths = self._rerank_paths(paths, da)
# measure slot error on the top k paths
if self.slot_err_stats:
for path in paths[:self.sample_top_k]:
self.slot_err_stats.append(
da, self.tree_embs.ids_to_strings([inp[0] for inp in path.dec_inputs]))
# select the "best" path -- either the best, or one in top k
if self.sample_top_k > 1:
best_path = self._sample_path(paths[:self.sample_top_k])
else:
best_path = paths[0]
# return just the best path (as token IDs)
return np.array(best_path.dec_inputs)
def _init_beam_search(self, enc_inputs):
raise NotImplementedError()
def _beam_search_step(self, dec_inputs, dec_states):
raise NotImplementedError()
def _rerank_paths(self, paths, da):
"""Rerank the n-best decoded paths according to the reranking classifier and/or
BLEU against context."""
trees = [self.tree_embs.ids_to_tree(np.array(path.dec_inputs).transpose()[0])
for path in paths]
# rerank using BLEU against context if set to do so
if self.context_bleu_weight:
bm = BLEUMeasure(max_ngram=2)
bleus = []
for path, tree in zip(paths, trees):
bm.reset()
bm.append([(n.t_lemma, None) for n in tree.nodes[1:]], [da[0]])
bleu = (bm.ngram_precision()
if self.context_bleu_metric == 'ngram_prec'
else bm.bleu())
bleus.append(bleu)
path.logprob += self.context_bleu_weight * bleu
log_debug(("BLEU for context: %s\n\n" % " ".join([form for form, _ in da[0]])) +
"\n".join([("%.5f\t" % b) + " ".join([n.t_lemma for n in t.nodes[1:]])
for b, t in zip(bleus, trees)]))
# add distances to logprob so that non-fitting will be heavily penalized
if self.classif_filter:
self.classif_filter.init_run(da)
fits = self.classif_filter.dist_to_cur_da(trees)
for path, fit in zip(paths, fits):
path.logprob -= self.misfit_penalty * fit
log_debug(("Misfits for DA: %s\n\n" % str(da)) +
"\n".join([("%.5f\t" % fit) +
" ".join([str(n.t_lemma) for n in tree.nodes[1:]])
for fit, tree in zip(fits, trees)]))
# adjust paths for length (if set to do so)
if self.length_norm_weight:
for path in paths:
path.logprob /= len(path) ** self.length_norm_weight
return sorted(paths, key=lambda p: p.logprob, reverse=True)
def _sample_path(self, paths):
"""Sample one path from the top k paths, based on their probabilities."""
# convert the logprobs to a probability distribution, proportionate to their sizes
logprobs = [p.logprob for p in paths]
max_logprob = max(logprobs)
probs = [math.exp(l - max_logprob) for l in logprobs] # discount to avoid underflow, result is unnormalized
sum_prob = sum(probs)
probs = [p / sum_prob for p in probs] # normalized
# select the path based on a draw from the uniform distribution
draw = rnd.random()
cum = 0.0 # building cumulative distribution function on-the-fly
selected = -1
for idx, prob in enumerate(probs):
high = cum + prob
if cum <= draw and draw < high: # the draw has hit this index in the CDF
selected = idx
break
cum = high
return paths[selected]
def generate_tree(self, da, gen_doc=None):
"""Generate one tree, saving it into the document provided (if applicable).
@param da: the input DA
@param gen_doc: the document where the tree should be saved (defaults to None)
"""
# generate the tree
log_debug("GENERATE TREE FOR | |
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run evaluation of VCMR or infenrece of TVR for submission
"""
import argparse
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
import numpy as np
from tqdm import tqdm
import pprint
from apex import amp
from horovod import torch as hvd
from data import (VcmrFullEvalDataset, vcmr_full_eval_collate,
VcmrVideoOnlyFullEvalDataset,
PrefetchLoader, QueryTokLmdb,
video_collate)
from load_data import (get_video_ids,
load_video_sub_dataset,
load_video_only_dataset)
from data.loader import move_to_cuda
from model.vcmr import HeroForVcmr
from utils.logger import LOGGER
from utils.const import VFEAT_DIM, VCMR_IOU_THDS
from utils.tvr_standalone_eval import eval_retrieval
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.basic_utils import (
load_json, save_json)
from utils.tvr_eval_utils import (
find_max_triples_from_upper_triangle_product,
generate_min_max_length_mask,
get_submission_top_n, post_processing_vcmr_nms,
post_processing_svmr_nms)
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, 16-bits training: {}".format(device, n_gpu, hvd.rank(), opts.fp16))
if hvd.rank() != 0:
LOGGER.disabled = True
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(load_json(hps_file))
model_config = f'{opts.output_dir}/log/model_config.json'
# load DBs and image dirs
video_ids = get_video_ids(opts.query_txt_db)
if opts.task != "didemo_video_only":
video_db = load_video_sub_dataset(opts.vfeat_db, opts.sub_txt_db, model_opts.vfeat_interval, model_opts)
else:
txt_meta = load_json(os.path.join(opts.query_txt_db, "meta.json"))
video_db = load_video_only_dataset(opts.vfeat_db, txt_meta, model_opts.vfeat_interval, model_opts)
assert opts.split in opts.query_txt_db
q_txt_db = QueryTokLmdb(opts.query_txt_db, -1)
if opts.task != "didemo_video_only":
inf_dataset = VcmrFullEvalDataset
else:
inf_dataset = VcmrVideoOnlyFullEvalDataset
eval_dataset = inf_dataset(video_ids, video_db, q_txt_db, distributed=model_opts.distributed_eval)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
img_pos_embed_weight_key = ("v_encoder.f_encoder.img_embeddings.position_embeddings.weight")
assert img_pos_embed_weight_key in checkpoint
max_frm_seq_len = len(checkpoint[img_pos_embed_weight_key])
model = HeroForVcmr.from_pretrained(
model_config,
state_dict=checkpoint,
vfeat_dim=VFEAT_DIM,
max_frm_seq_len=max_frm_seq_len,
lw_neg_ctx=model_opts.lw_neg_ctx,
lw_neg_q=model_opts.lw_neg_q, lw_st_ed=0,
ranking_loss_type=model_opts.ranking_loss_type,
use_hard_negative=False,
hard_pool_size=model_opts.hard_pool_size,
margin=model_opts.margin,
use_all_neg=model_opts.use_all_neg,
drop_svmr_prob=model_opts.drop_svmr_prob)
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset, batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcmr_full_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
_, results = validate_full_vcmr(model, eval_dataloader, opts.split, opts, model_opts)
result_dir = f'{opts.output_dir}/results_{opts.split}'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results_list = all_gather_list(results)
if hvd.rank() == 0: # save for only one time
all_results = {"video2idx": all_results_list[0]["video2idx"]}
for rank_id in range(hvd.size()):
for key, val in all_results_list[rank_id].items():
if key == "video2idx":
continue
if key not in all_results:
all_results[key] = []
all_results[key].extend(all_results_list[rank_id][key])
LOGGER.info('All results joined......')
# from ivcml_util import list_histogram, show_type_tree
# num_vcmr_moment = [len(item["predictions"]) for item in all_results["VCMR"]]
# list_histogram(num_vcmr_moment, title="Number of moment number retrieved by HERO (gathered).", fig_name=f"dist_mom_num_full_gathered.png")
# save_vr(all_results, f'{result_dir}/results_{opts.checkpoint}_{opts.split}_vr.json')
# save_vcmr_base_on_vr(all_results, f'{result_dir}/results_{opts.checkpoint}_{opts.split}_vcmr_base_on_vr.json')
# save_vcmr(all_results, f'{result_dir}/results_{opts.checkpoint}_{opts.split}_vcmr.json')
# save_json(
# all_results,
# f'{result_dir}/results_{opts.checkpoint}_all.json')
def save_vr(results, target): # add by zhixin
k = 4
vidx2vid = {results["video2idx"][vid]: vid for vid in results["video2idx"]}
vr_submission = {item["desc_id"]: [vidx2vid[s[0]] for s in item["predictions"][:k]] for item in results["VR"]}
save_json(vr_submission, target)
LOGGER.info('VR results written......')
def save_vcmr_base_on_vr(results, target): # add by zhixin
k = 4
vidx2vid = {results["video2idx"][vid]: vid for vid in results["video2idx"]}
vr_result = {item["desc_id"]: [vidx2vid[s[0]] for s in item["predictions"][:k]] for item in results["VR"]}
vcmr_result = results["VCMR"]
vcmr_submission = {}
found = False
for i, item in enumerate(vcmr_result):
desc_id = item["desc_id"]
for rank, vcmr_proposal in enumerate(item["predictions"]):
vidx, st, ed, s = vcmr_proposal
vid = vidx2vid[vidx]
if vid in vr_result[desc_id]:
rank_in_vr = vr_result[desc_id].index(vid)
vcmr_submission[desc_id] = (rank, rank_in_vr, vid, st, ed)
found = True
break
if not found:
assert False
save_json(vcmr_submission, target)
LOGGER.info('VCMR (based on VR) results written......')
def save_vcmr(results, target): # add by zhixin
def __format_vcmr_prediction(pred, top_k):
_v_idx, _st, _ed, _score = zip(*pred)
# map video index to video id
_v_id = [vidx2vid[_idx] for _idx in _v_idx]
# precess score
_score = torch.tensor(_score).softmax(-1).tolist()
pred = list(map(list, zip(_v_id, _st, _ed, _score)))[:top_k] # list of list
return pred
k = 200
vidx2vid = {results["video2idx"][vid]: vid for vid in results["video2idx"]}
vcmr_result = results["VCMR"]
vcmr_pred = {}
for i, item in enumerate(vcmr_result):
vcmr_pred[item["desc_id"]] = __format_vcmr_prediction(item["predictions"], k) # [[vid, st, ed, score], ...]
save_json(vcmr_pred, target)
LOGGER.info('VCMR results written......')
def is_score_ndarray(arr: np.ndarray):
assert ((0 <= arr) * (arr <= 1)).sum() / arr.size, arr
def is_score_tensor(arr: torch.Tensor):
assert ((0 <= arr) * (arr <= 1)).sum() / arr.numel(), arr
@torch.no_grad()
def validate_full_vcmr(model, val_loader, split, opts, model_opts):
LOGGER.info("start running full VCMR evaluation on {opts.task} {split} split...")
model.eval()
n_ex = 0
st = time()
val_log = {}
has_gt_target = True
val_vid2idx = val_loader.dataset.vid2idx
if split in val_vid2idx:
video2idx_global = val_vid2idx[split]
else:
video2idx_global = val_vid2idx
video_ids = sorted(list(video2idx_global.keys()))
video2idx_local = {e: i for i, e in enumerate(video_ids)}
query_data = val_loader.dataset.query_data
partial_query_data = []
total_frame_embeddings, total_c_attn_masks = None, None
video_batch, video_idx = [], []
max_clip_len = 0
for video_i, (vid, vidx) in tqdm(enumerate(video2idx_local.items()), desc="Computing Video Embeddings", total=len(video2idx_local)):
video_item = val_loader.dataset.video_db[vid]
video_batch.append(video_item)
video_idx.append(vidx)
if len(video_batch) == opts.vcmr_eval_video_batch_size or video_i == len(video2idx_local) - 1:
video_batch = move_to_cuda(video_collate(video_batch))
# Safeguard fp16
for k, item in video_batch.items():
if isinstance(item, torch.Tensor) and item.dtype == torch.float32:
video_batch[k] = video_batch[k].to(dtype=next(model.parameters()).dtype)
curr_frame_embeddings = model.v_encoder(video_batch, 'repr')
curr_c_attn_masks = video_batch['c_attn_masks']
curr_clip_len = curr_frame_embeddings.size(-2)
assert curr_clip_len <= model_opts.max_clip_len
if total_frame_embeddings is None:
feat_dim = curr_frame_embeddings.size(-1)
total_frame_embeddings = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len, feat_dim),
dtype=curr_frame_embeddings.dtype,
device=curr_frame_embeddings.device)
total_c_attn_masks = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len),
dtype=curr_c_attn_masks.dtype,
device=curr_frame_embeddings.device)
indices = torch.tensor(video_idx)
total_frame_embeddings[indices, :curr_clip_len] = curr_frame_embeddings
total_c_attn_masks[indices, :curr_clip_len] = curr_c_attn_masks
max_clip_len = max(max_clip_len, curr_clip_len)
video_batch, video_idx = [], []
total_frame_embeddings = total_frame_embeddings[:, :max_clip_len, :]
total_c_attn_masks = total_c_attn_masks[:, :max_clip_len]
total_c_attn_masks = total_c_attn_masks[:, :max_clip_len]
svmr_st_probs_total, svmr_ed_probs_total = None, None
sorted_q2c_indices, sorted_q2c_scores = None, None
flat_st_ed_sorted_scores, flat_st_ed_scores_sorted_indices = None, None
total_qids, total_vids = [], []
for batch in tqdm(val_loader, desc="Computing q2vScores"):
qids = batch['qids']
vids = batch['vids']
targets = batch['targets']
if has_gt_target and targets.min() < 0:
has_gt_target = False
LOGGER.info("No GT annotations provided, only generate predictions")
del batch['targets'], batch['qids'], batch['vids'] # for the following input
total_qids.extend(qids)
total_vids.extend(vids)
for qid in qids:
partial_query_data.append(query_data[qid])
# Safeguard fp16
for k, item in batch.items():
if isinstance(item, torch.Tensor) and item.dtype == torch.float32:
batch[k] = batch[k].to(dtype=next(model.parameters()).dtype)
# FIXME
_q2video_scores, _st_probs, _ed_probs = \
model.get_pred_from_raw_query(total_frame_embeddings, total_c_attn_masks, **batch, cross=True, val_gather_gpus=False)
_st_probs = F.softmax(_st_probs, dim=-1)
_ed_probs = F.softmax(_ed_probs, dim=-1)
n_ex += len(qids)
if "SVMR" in opts.full_eval_tasks and has_gt_target:
row_indices = torch.arange(0, len(_st_probs))
svmr_gt_vidx = torch.tensor([video2idx_local[e] for e in vids])
svmr_st_probs = _st_probs[row_indices, svmr_gt_vidx].float().cpu().numpy()
svmr_ed_probs = _ed_probs[row_indices, svmr_gt_vidx].float().cpu().numpy()
if svmr_st_probs_total is None:
svmr_st_probs_total = svmr_st_probs
svmr_ed_probs_total = svmr_ed_probs
else:
svmr_st_probs_total = np.concatenate((svmr_st_probs_total, svmr_st_probs), axis=0)
svmr_ed_probs_total = np.concatenate((svmr_ed_probs_total, svmr_ed_probs), axis=0)
if "VR" not in opts.full_eval_tasks or _q2video_scores is None:
continue
_q2video_scores = _q2video_scores.float()
# To give more importance to top scores,
# the higher opt.alpha is the more importance will be given
q2video_scores = torch.exp(model_opts.q2c_alpha * _q2video_scores)
_sorted_q2c_scores, _sorted_q2c_indices = torch.topk(q2video_scores, model_opts.max_vcmr_video, dim=1, largest=True)
if sorted_q2c_indices is None:
sorted_q2c_indices = _sorted_q2c_indices.cpu().numpy()
sorted_q2c_scores = _sorted_q2c_scores.cpu().numpy()
else:
sorted_q2c_indices = np.concatenate((sorted_q2c_indices, _sorted_q2c_indices.cpu().numpy()), axis=0)
sorted_q2c_scores = np.concatenate((sorted_q2c_scores, _sorted_q2c_scores.cpu().numpy()), axis=0)
if "VCMR" not in opts.full_eval_tasks:
continue
row_indices = torch.arange(0, len(_st_probs), device=_st_probs.device).unsqueeze(1)
_st_probs = _st_probs[row_indices, _sorted_q2c_indices] # (_N_q, max_vcmr_video, L)
_ed_probs = _ed_probs[row_indices, _sorted_q2c_indices]
# (_N_q, max_vcmr_video, L, L)
_st_ed_scores = torch.einsum("qvm,qv,qvn->qvmn", _st_probs, _sorted_q2c_scores, _ed_probs)
valid_prob_mask = generate_min_max_length_mask(_st_ed_scores.shape, min_l=model_opts.min_pred_l, max_l=model_opts.max_pred_l)
_st_ed_scores *= torch.from_numpy(valid_prob_mask).to(_st_ed_scores.device) # invalid location will become zero!
# sort across the top-max_n_videos videos (by flatten from the 2nd dim)
# the indices here are local indices, not global indices
_n_q = _st_ed_scores.shape[0]
_flat_st_ed_scores = _st_ed_scores.reshape(_n_q, -1) # (N_q, max_vcmr_video*L*L)
_flat_st_ed_sorted_scores, _flat_st_ed_scores_sorted_indices = torch.sort(_flat_st_ed_scores, dim=1, descending=True)
if flat_st_ed_sorted_scores is None:
flat_st_ed_scores_sorted_indices = _flat_st_ed_scores_sorted_indices[:, :model_opts.max_before_nms].cpu().numpy()
flat_st_ed_sorted_scores = _flat_st_ed_sorted_scores[:, :model_opts.max_before_nms].cpu().numpy()
else:
flat_st_ed_scores_sorted_indices = \
np.concatenate((flat_st_ed_scores_sorted_indices, _flat_st_ed_scores_sorted_indices[:, :model_opts.max_before_nms].cpu().numpy()), axis=0)
flat_st_ed_sorted_scores = \
np.concatenate((flat_st_ed_sorted_scores, _flat_st_ed_sorted_scores[:, :model_opts.max_before_nms].cpu().numpy()), axis=0)
svmr_res, vr_res, vcmr_res = [], [], []
if "SVMR" in opts.full_eval_tasks and has_gt_target:
st_ed_prob_product = np.einsum("bm,bn->bmn", svmr_st_probs_total, svmr_ed_probs_total) # (N, L, L)
valid_prob_mask = generate_min_max_length_mask(st_ed_prob_product.shape, min_l=model_opts.min_pred_l, max_l=model_opts.max_pred_l)
# invalid location will become zero!
st_ed_prob_product *= valid_prob_mask
batched_sorted_triples = find_max_triples_from_upper_triangle_product(st_ed_prob_product, top_n=model_opts.max_before_nms, prob_thd=None)
for svmr_i, (qid, vid) in tqdm(enumerate(zip(total_qids, total_vids)), desc="[SVMR] Loop over queries to generate predictions", total=len(total_qids)):
vidx = video2idx_global[vid]
_sorted_triples = batched_sorted_triples[svmr_i]
# as we redefined ed_idx, which is inside the moment.
_sorted_triples[:, 1] += 1 # why 1 bias?
_sorted_triples[:, :2] = (_sorted_triples[:, :2] * model_opts.vfeat_interval) # frame duration in down sampling
cur_ranked_predictions = [[vidx, ] + row for row in _sorted_triples.tolist()]
cur_query_pred = dict(desc_id=int(qid), desc="", predictions=cur_ranked_predictions)
svmr_res.append(cur_query_pred)
if "VR" in opts.full_eval_tasks:
for vr_i, (_sorted_q2c_scores_row, _sorted_q2c_indices_row) in tqdm(
enumerate(zip(sorted_q2c_scores[:, :100], sorted_q2c_indices[:, :100])),
desc="[VR] Loop over queries to generate predictions",
total=len(total_qids)):
cur_vr_predictions = []
for v_score, v_meta_idx in zip(_sorted_q2c_scores_row, _sorted_q2c_indices_row):
video_idx = video2idx_global[video_ids[v_meta_idx]]
cur_vr_predictions.append([video_idx, 0, 0, float(v_score)])
cur_query_pred = dict(desc_id=int(total_qids[vr_i]), desc="", predictions=cur_vr_predictions)
vr_res.append(cur_query_pred)
if "VCMR" in opts.full_eval_tasks:
for vcmr_i, (_flat_st_ed_scores_sorted_indices, _flat_st_ed_sorted_scores) in \
tqdm(enumerate(zip(flat_st_ed_scores_sorted_indices, flat_st_ed_sorted_scores)),
desc="[VCMR] Loop over queries to generate | |
3))
elif (border_mode == 'full' and subsample == (1, 1) and
direction_hint == 'bprop inputs'):
# Special case: We are asked to use GpuDnnConvGradI. We need to set
# up a suitable 'fake' convolution to compute the gradient for.
img = gpu_contiguous(img)
kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))
conv_mode = 'cross' if conv_mode == 'conv' else 'conv'
out_shp = (shape_i(img, 0, fgraph),
shape_i(kerns, 1, fgraph),
shape_i(img, 2, fgraph) + shape_i(kerns, 2, fgraph) - 1,
shape_i(img, 3, fgraph) + shape_i(kerns, 3, fgraph) - 1)
out_shp = assert_conv_shape(out_shp)
out = gpu_alloc_empty(*out_shp)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode=conv_mode, precision=precision)(out.shape,
kerns.shape)
return GpuDnnConvGradI()(kerns, img, out, desc)
# Standard case: We use GpuDnnConv with suitable padding.
# contig_version will return a gpu_contiguous copy
# if the img contains negative strides
img = gpu_contiguous(img)
kerns = gpu_contiguous(kerns)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode, precision=precision)(img.shape,
kerns.shape)
desc_op = desc.owner.op
out_shp = GpuDnnConv.get_out_shape(img.shape, kerns.shape,
desc_op.border_mode,
desc_op.subsample)
out_shp = assert_conv_shape(out_shp)
out = gpu_alloc_empty(*out_shp)
return GpuDnnConv(algo=algo)(img, kerns, out, desc)
def dnn_conv3d(img, kerns, border_mode='valid', subsample=(1, 1, 1),
conv_mode='conv', direction_hint=None, workmem=None,
algo=None, precision=None):
"""
GPU convolution using cuDNN from NVIDIA.
The memory layout to use is 'bct01', that is 'batch', 'channel',
'first dim', 'second dim', 'third dim' in that order.
:param img: images to do the convolution over
:param kerns: convolution filters
:param border_mode: One of 'valid', 'full', 'half'; additionally, the
padding size can be directly specified by an integer or a triplet of
integers (as a tuple), specifying the amount of zero padding added to
_both_ the top and bottom (first entry) and left and right (second
entry) and front and back (third entry) sides of the volume.
:param subsample: perform subsampling of the output (default: (1, 1, 1))
:param conv_mode: perform convolution (kernels flipped) or
cross-correlation. One of 'conv', 'cross'. (default: 'conv')
:param direction_hint: Used by graph optimizers to change algorithm choice.
By default, GpuDnnConv will be used to carry out the convolution.
If border_mode is 'valid', subsample is (1,1,1) and direction_hint is
'bprop weights', it will use GpuDnnConvGradW.
This parameter is used internally by graph optimizers and may be
removed at any time without a deprecation period. You have been warned.
:param workmem: *deprecated*, use param algo instead
:param algo: convolution implementation to use. Only 'none' is implemented
for the conv3d. Default is the value of
:attr:`config.dnn.conv.algo_fwd`.
:param precision: dtype in which the computation of the convolution
should be done. Possible values are 'as_input_f32', 'as_input',
'float16', 'float32' and 'float64'. Default is the value of
:attr:`config.dnn.conv.precision`.
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
:warning: dnn_conv3d only works with cuDNN library 3.0
"""
if border_mode == (0, 0, 0):
border_mode = 'valid'
# Establish dtype in which to perform the computation of the convolution
if precision is None:
precision = theano.config.dnn.conv.precision
if precision == 'as_input' or precision == 'as_input_f32':
nprec = theano.scalar.upcast(img.dtype, kerns.dtype)
if nprec == 'float16' and precision == 'as_input_f32':
precision = 'float32'
else:
precision = nprec
# Check if deprecated param 'workmem' is used
if workmem is not None:
warnings.warn(("dnn_conv3d: parameter 'workmem' is deprecated. Use "
"'algo' instead."), stacklevel=3)
assert algo is None
algo = workmem
# Ensure the value of direction_hint is supported
assert direction_hint in [None, 'bprop weights', 'forward']
fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)
if (border_mode == 'valid' and subsample == (1, 1, 1) and
direction_hint == 'bprop weights'):
# Special case: We are asked to use GpuDnnConvGradW. We need to set
# up a suitable 'fake' convolution to compute the gradient for.
img = gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4))
if conv_mode == 'conv':
# We need to flip manually. These 'kerns' are not the kernels
# that would be flipped by conv_mode='conv' in GpuDnnConvGradW.
kerns = kerns[:, :, ::-1, ::-1, ::-1]
kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3, 4))
out_shp = (shape_i(kerns, 1, fgraph),
shape_i(img, 1, fgraph),
shape_i(img, 2, fgraph) - shape_i(kerns, 2, fgraph) + 1,
shape_i(img, 3, fgraph) - shape_i(kerns, 3, fgraph) + 1,
shape_i(img, 4, fgraph) - shape_i(kerns, 4, fgraph) + 1)
out_shp = assert_conv_shape(out_shp)
out = gpu_alloc_empty(*out_shp)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1, 1),
conv_mode='cross', precision=precision)(img.shape,
out.shape)
conv = GpuDnnConv3dGradW()(img, kerns, out, desc)
return as_cuda_ndarray_variable(conv.dimshuffle(1, 0, 2, 3, 4))
# Standard case: We use GpuDnnConv with suitable padding.
# contig_version will return a gpu_contiguous copy
# if the img contains negative strides
img = gpu_contiguous(img)
kerns = gpu_contiguous(kerns)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode, precision=precision)(img.shape,
kerns.shape)
desc_op = desc.owner.op
out_shp = GpuDnnConv3d.get_out_shape(img.shape, kerns.shape,
desc_op.border_mode,
desc_op.subsample)
out_shp = assert_conv_shape(out_shp)
out = gpu_alloc_empty(*out_shp)
return GpuDnnConv3d(algo=algo)(img, kerns, out, desc)
def dnn_gradweight(img, topgrad,
kerns_shp,
border_mode='valid', subsample=(1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to weight using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
img = gpu_contiguous(img)
topgrad = gpu_contiguous(topgrad)
kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img.shape, kerns_shp)
out = gpu_alloc_empty(*kerns_shp)
return GpuDnnConvGradW()(img, topgrad, out, desc)
def dnn_gradweight3d(img, topgrad,
kerns_shp,
border_mode='valid', subsample=(1, 1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to weight using cuDNN from NVIDIA.
The memory layout to use is 'bct01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
img = gpu_contiguous(img)
topgrad = gpu_contiguous(topgrad)
kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img.shape, kerns_shp)
out = gpu_alloc_empty(*kerns_shp)
return GpuDnnConv3dGradW()(img, topgrad, out, desc)
def dnn_gradinput(kerns, topgrad,
img_shp,
border_mode='valid', subsample=(1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to input using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
kerns = gpu_contiguous(kerns)
topgrad = gpu_contiguous(topgrad)
img_shp = theano.tensor.as_tensor_variable(img_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img_shp, kerns.shape)
out = gpu_alloc_empty(*img_shp)
return GpuDnnConvGradI()(kerns, topgrad, out, desc)
def dnn_gradinput3d(kerns, topgrad,
img_shp,
border_mode='valid', subsample=(1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to input using cuDNN from NVIDIA.
The memory layout to use is 'bct01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
kerns = gpu_contiguous(kerns)
topgrad = gpu_contiguous(topgrad)
img_shp = theano.tensor.as_tensor_variable(img_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img_shp, kerns.shape)
out = gpu_alloc_empty(*img_shp)
return GpuDnnConv3dGradI()(kerns, topgrad, out, desc)
class GpuDnnPoolDesc(GpuOp):
"""
This Op builds a pooling descriptor for use in the other pooling operations.
Parameters
----------
ws
Windows size.
stride
(dx, dy).
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
The old deprecated name 'average' correspond to 'average_inc_pad'.
pad
(pad_h, pad_w) padding information.
pad_h is the number of zero-valued pixels added to each of the top and
bottom borders.
pad_w is the number of zero-valued pixels added to each of the left and
right borders.
Note
----
Do not use anymore. Only needed to reload old pickled files.
"""
__props__ = ('ws', 'stride', 'mode', 'pad')
def c_headers(self):
return ['cudnn.h', 'cudnn_helper.h']
def c_header_dirs(self):
return [os.path.dirname(__file__), config.dnn.include_path]
def c_libraries(self):
return ['cudnn']
def c_lib_dirs(self):
return [config.dnn.library_path]
def c_compiler(self):
return NVCC_compiler
def do_constant_folding(self, node):
return False
def __init__(self, ws=(1, 1), stride=None, mode='max', pad=None):
if mode == 'average':
mode = 'average_inc_pad'
assert mode in ('max', 'average_inc_pad', 'average_exc_pad')
self.mode = mode
if stride is None:
stride = (1,) * len(ws)
if pad is None:
pad = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.