input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
0
for label in self.gridline_values:
#draw the gridline
gridline = ET.Element("path", d="M %d %d L %d %d" % (0, (self.grid_height - count * grid_space), self.grid_width, (self.grid_height - count * grid_space)))
gridline.attrib['class'] = 'y-gridline'
y_axis.append(gridline)
#draw the text label
gridline_label = ET.Element("text", x="%s" % (-self.y_label_padding), y="%s" % ( self.grid_height - (count * grid_space) ) )
#um = self.max_y_axis_value - (count * grid_value_increment)
text = "%s" % label
text = self.convert_units(label)
gridline_label.text = text
gridline_label.attrib['class'] = 'y-axis-label'
y_axis.append(gridline_label)
count += 1
self.grid.append(x_axis)
self.grid.append(y_axis)
def check_label_types(self):
current_type = type(self.labels[0])
# for label in self.labels:
#START HERE
def data_point_label(self, value, x, y):
dp_label = ET.Element("text", x="%s" % x, y="%s" % y)
text = str(value)
text = self.convert_units(value)
dp_label.text = "%s" % text
dp_label.attrib['class'] = 'data-point-label'
self.grid.append(dp_label)
def get_display_unit(self):
#need to change this to be for tick marks, not actual data points
if self.min_y_axis_value != 0:
print self.min_y_axis_value
return self.match_unit(self.min_y_axis_value)
else:
return self.match_unit(self.gridline_values[1])
min_unit = (1000000000000, 'Tr')
for series in self.data:
if series != 'placeholder':
for point in series:
temp_unit = self.match_unit(point[1])
if temp_unit[0] < min_unit[0]:
min_unit = temp_unit
break
return min_unit
def match_unit(self, value):
for unit in reversed(CURRENCY):
if value / float(unit[0]) >= 1:
return unit
return (1, '')
def convert_units(self, value):
text = ""
if self.currency:
text = "$"
text = text + "%2g" % round (value / self.y_display_unit[0], 2)
if self.units:
text = text + self.y_display_unit[1]
return text
class Line(GridChart):
def __init__(self, height, width, data, stylesheet=None, *args, **kwargs):
super(Line, self).__init__(height, width, data, stylesheet, **kwargs)
self.x_scale = self.set_scale() #find the width of each point in each series
self.x_group_scale = self.x_scale * self.number_of_series #width of each data point grouping over multiple series
self.setup_chart()
self.data_series() #Chart subclass should have this method to chart the data series
#self.labels.sort() # Yikes! sorting the labels independently from the data leads to problems...
self.set_labels()
def set_scale(self):
#pixels between data points
return float(self.grid_width - (self.x_padding * 2) ) / (len(self.labels) - 1)
def data_series(self):
series_count = 0
left_offset = self.padding
bottom_offset = self.padding
g_container = ET.Element('g')
for series in self.data:
series_count += 1
if series != 'placeholder':
#move path to initial data point
data_point_count = self.labels.index(series[0][0])
path_string = "M %s %s" % (self.x_padding + int(data_point_count * self.x_scale), self.grid_height - ((series[0][1] - self.min_y_axis_value) * self.y_scale))
for point in series:
if data_point_count == 0:
data_point_count += 1
continue
data_point_count = self.labels.index(point[0])
path_string += " L "
x = self.x_padding + int(data_point_count * self.x_scale)
point_height = self.y_scale * (point[1] - self.min_y_axis_value)
y = self.grid_height - point_height
path_string += "%s %s" % (x, y)
data_point_count += 1
#put point markers in here at some point?
line = ET.Element("path", d=path_string)
line.attrib['class'] = 'series-%s-line' % series_count
g_container.append(line)
self.grid.append(g_container)
def set_labels(self):
label_count = 0
notch_interval = self.label_intervals
notch_start = 1
label_start = 0
if hasattr(self, "label_offset"):
if self.label_offset > 0:
notch_start = 0
label_start = self.label_offset
for l in self.labels:
x_position = self.x_padding + (label_count * self.x_scale)
y_position = self.grid_height + self.x_label_padding
if (self.label_intervals and (label_count >= label_start) and (label_count - label_start) % self.label_intervals == 0) or not self.label_intervals:
text_item = ET.Element("text")
text_item.attrib['x'] = "%s" % x_position
text_item.attrib['y'] = "%s" % y_position
text_item.text = "%s" % l
text_item.attrib['class'] = 'x-axis-label'
self.grid.append(text_item)
#insert the notch between data point groups
if l != self.labels[-1]:
if self.label_intervals:
skip_labels = self.label_intervals
else: skip_labels = 1
notch_x_pos = x_position + (((self.x_padding + ((label_count + skip_labels) * self.x_scale)) - x_position) / 2)
notch_y_pos = self.grid_height
notch = ET.Element("path", d="M %s %s L %s %s" % (notch_x_pos, notch_y_pos, notch_x_pos, notch_y_pos + 5))
notch.attrib['class'] = 'x-notch'
self.grid.append(notch)
label_count += 1
class Column(GridChart):
"""Subclass of GridChart class, specific to an n-series column chart """
def __init__(self, height, width, data, stylesheet=None, *args, **kwargs):
super(Column, self).__init__(height, width, data, stylesheet, **kwargs)
self.max_x_point_width = 30 #How wide should a bar chart be if there's plenty of white space -->move to bar chart only
#find the width of each point in each series
self.x_scale = self.set_scale()
#width of each data point grouping over multiple series
self.x_group_scale = self.x_scale * self.number_of_series
self.setup_chart()
#Chart subclass should have this method to chart the data series
self.data_series()
#self.labels.sort() # Yikes! sorting the labels independently from the data leads to problems...
self.set_labels()
def set_scale(self):
scale = (self.grid_width / self.max_data_points / self.number_of_series)# - self.x_padding
if self.max_x_point_width < scale:
#need to adjust white space padding
self.x_padding = (self.grid_width - (self.number_of_series * self.max_data_points * self.max_x_point_width)) / (self.max_data_points)
return self.max_x_point_width
else:
return scale
def data_series(self):
series_count = 0
left_offset = self.padding
bottom_offset = self.padding
for series in self.data:
data_point_count = 0
for point in series:
data_point_count = self.labels.index(point[0])
point_width = self.x_scale
x_position = (self.x_padding / 2) + (data_point_count * (self.x_group_scale + self.x_padding) ) + (series_count * point_width)
if isinstance(point[1], (int, long, float, complex)):
point_height = self.y_scale * (point[1] - self.min_y_axis_value)
else:
#value may be a string to display
point_height = self.max_y_axis_value * self.y_scale
text = ET.Element("text", x="%s" % x_position, y="%s" % (self.grid_height - (point_height/2)))
words = point[1].split('\n')
num_words = 0
for w in words:
text_span = ET.Element("tspan", x="%s" % x_position, y="%s" % (self.grid_height - ((len(words) * 14) - (num_words * 14) ) ))
text_span.text = w
text.append(text_span)
num_words += 1
text.attrib['class'] = 'value-as-label'
self.grid.append(text)
data_point_count += 1
continue
y_position = (self.grid_height - point_height)
data_point = ET.Element("rect", x="%s" % x_position, y="%s" % y_position, height="%s" % point_height, width="%s" % point_width )
data_point.attrib['class'] = 'series-%s-point' % series_count
#insert the notch between data point groups
if series == self.data[-1] and point != series[-1]:
notch_x_pos = x_position + (point_width) + (self.x_padding / 2)
notch_y_pos = self.grid_height
notch = ET.Element("path", d="M %s %s L %s %s" % (notch_x_pos, notch_y_pos, notch_x_pos, notch_y_pos + 5))
notch.attrib['class'] = 'x-notch'
self.grid.append(notch)
self.grid.append(data_point)
self.data_point_label(point[1], x_position + (point_width / 2), y_position - 5)
data_point_count += 1
series_count += 1
def add_label(self, label, label_count, word_count=0):
x_position = int((self.x_padding / 2) + (self.x_group_scale / 2) + (label_count * (self.x_group_scale + self.x_padding)))
y_position = self.grid_height + self.x_label_padding + (13 * word_count)
text_item = ET.Element("text", x="%s" % x_position, y="%s" % y_position)
text_item.text = "%s" % label
text_item.attrib['class'] = 'x-axis-label'
if self.label_rotate:
text_item.attrib['transform'] = "rotate(%s, %s, %s)" % (self.label_rotate, x_position, y_position)
if self.label_rotate < 1:
text_item.attrib['style'] = "text-anchor: end;"
else:
text_item.attrib['style'] = 'text-anchor: start;'
self.grid.append(text_item)
def set_labels(self):
label_count = 0
for l in self.labels:
if not self.numeric_labels:
if len(l.split('\n')):
#multiline label
word_count = 0
for word in l.split('\n'):
self.add_label(word, label_count, word_count)
word_count += 1
else:
self.add_label(l, label_count)
else:
self.add_label(l, label_count)
label_count += 1
class StackedColumn(GridChart):
"""Subclass of GridChart class, specific to an n-series column chart """
def __init__(self, height, width, data, stylesheet=None, *args, **kwargs):
super(StackedColumn, self).__init__(height, width, data, stylesheet, **kwargs)
self.max_x_point_width = 60 #How wide should a bar chart be if there's plenty of white space -->move to bar chart only
#find the width of each point in each series
self.x_group_scale = self.set_group_scale()
self.x_scale = self.set_scale()
#width of each data point grouping over multiple series
self.setup_chart()
#Chart subclass should have this method to chart the data series
self.data_series()
self.set_labels()
#insert the notch between data point groups
lcount = 0
for l in self.labels:
if lcount == 0:
notch_x_pos = 0
else:
notch_x_pos = self.x_padding + (lcount * self.x_group_scale)
notch_y_pos = self.grid_height
notch = ET.Element("path", d="M %s %s L %s %s" % (notch_x_pos, notch_y_pos, notch_x_pos, notch_y_pos + 5))
if lcount == 0: notch.attrib['class'] = 'x-notch-left'
else: notch.attrib['class'] = 'x-notch'
self.grid.append(notch)
lcount += 1
end_notch = ET.Element("path", d="M %s %s L %s %s" % (self.grid_width, self.grid_height, self.grid_width, self.grid_height + 5))
end_notch.attrib['class'] = 'x-notch-right'
self.grid.append(end_notch)
def set_scale(self):
if self.x_group_scale > self.max_x_point_width: return self.max_x_point_width
else: return self.x_group_scale
def set_group_scale(self):
return (self.grid_width - (self.x_padding * 2)) / len(self.labels)
def find_y_maximum(self):
total_per_label = {}
for series in self.data:
#because values are additive for each label, need to add them up to find the max and scale | |
import pytest
import networkx as nx
from networkx.algorithms.similarity import (
graph_edit_distance,
optimal_edit_paths,
optimize_graph_edit_distance,
)
from networkx.generators.classic import (
circular_ladder_graph,
cycle_graph,
path_graph,
wheel_graph,
)
def nmatch(n1, n2):
return n1 == n2
def ematch(e1, e2):
return e1 == e2
def getCanonical():
G = nx.Graph()
G.add_node("A", label="A")
G.add_node("B", label="B")
G.add_node("C", label="C")
G.add_node("D", label="D")
G.add_edge("A", "B", label="a-b")
G.add_edge("B", "C", label="b-c")
G.add_edge("B", "D", label="b-d")
return G
class TestSimilarity:
@classmethod
def setup_class(cls):
global numpy
global scipy
numpy = pytest.importorskip("numpy")
scipy = pytest.importorskip("scipy")
def test_graph_edit_distance_roots_and_timeout(self):
G0 = nx.star_graph(5)
G1 = G0.copy()
pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2])
pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2, 3, 4])
pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 3))
pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(3, 9))
pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 9))
assert graph_edit_distance(G0, G1, roots=(1, 2)) == 0
assert graph_edit_distance(G0, G1, roots=(0, 1)) == 8
assert graph_edit_distance(G0, G1, roots=(1, 2), timeout=5) == 0
assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=5) == 8
assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=0.0001) is None
# test raise on 0 timeout
pytest.raises(nx.NetworkXError, graph_edit_distance, G0, G1, timeout=0)
def test_graph_edit_distance(self):
G0 = nx.Graph()
G1 = path_graph(6)
G2 = cycle_graph(6)
G3 = wheel_graph(7)
assert graph_edit_distance(G0, G0) == 0
assert graph_edit_distance(G0, G1) == 11
assert graph_edit_distance(G1, G0) == 11
assert graph_edit_distance(G0, G2) == 12
assert graph_edit_distance(G2, G0) == 12
assert graph_edit_distance(G0, G3) == 19
assert graph_edit_distance(G3, G0) == 19
assert graph_edit_distance(G1, G1) == 0
assert graph_edit_distance(G1, G2) == 1
assert graph_edit_distance(G2, G1) == 1
assert graph_edit_distance(G1, G3) == 8
assert graph_edit_distance(G3, G1) == 8
assert graph_edit_distance(G2, G2) == 0
assert graph_edit_distance(G2, G3) == 7
assert graph_edit_distance(G3, G2) == 7
assert graph_edit_distance(G3, G3) == 0
def test_graph_edit_distance_node_match(self):
G1 = cycle_graph(5)
G2 = cycle_graph(5)
for n, attr in G1.nodes.items():
attr["color"] = "red" if n % 2 == 0 else "blue"
for n, attr in G2.nodes.items():
attr["color"] = "red" if n % 2 == 1 else "blue"
assert graph_edit_distance(G1, G2) == 0
assert (
graph_edit_distance(
G1, G2, node_match=lambda n1, n2: n1["color"] == n2["color"]
)
== 1
)
def test_graph_edit_distance_edge_match(self):
G1 = path_graph(6)
G2 = path_graph(6)
for e, attr in G1.edges.items():
attr["color"] = "red" if min(e) % 2 == 0 else "blue"
for e, attr in G2.edges.items():
attr["color"] = "red" if min(e) // 3 == 0 else "blue"
assert graph_edit_distance(G1, G2) == 0
assert (
graph_edit_distance(
G1, G2, edge_match=lambda e1, e2: e1["color"] == e2["color"]
)
== 2
)
def test_graph_edit_distance_node_cost(self):
G1 = path_graph(6)
G2 = path_graph(6)
for n, attr in G1.nodes.items():
attr["color"] = "red" if n % 2 == 0 else "blue"
for n, attr in G2.nodes.items():
attr["color"] = "red" if n % 2 == 1 else "blue"
def node_subst_cost(uattr, vattr):
if uattr["color"] == vattr["color"]:
return 1
else:
return 10
def node_del_cost(attr):
if attr["color"] == "blue":
return 20
else:
return 50
def node_ins_cost(attr):
if attr["color"] == "blue":
return 40
else:
return 100
assert (
graph_edit_distance(
G1,
G2,
node_subst_cost=node_subst_cost,
node_del_cost=node_del_cost,
node_ins_cost=node_ins_cost,
)
== 6
)
def test_graph_edit_distance_edge_cost(self):
G1 = path_graph(6)
G2 = path_graph(6)
for e, attr in G1.edges.items():
attr["color"] = "red" if min(e) % 2 == 0 else "blue"
for e, attr in G2.edges.items():
attr["color"] = "red" if min(e) // 3 == 0 else "blue"
def edge_subst_cost(gattr, hattr):
if gattr["color"] == hattr["color"]:
return 0.01
else:
return 0.1
def edge_del_cost(attr):
if attr["color"] == "blue":
return 0.2
else:
return 0.5
def edge_ins_cost(attr):
if attr["color"] == "blue":
return 0.4
else:
return 1.0
assert (
graph_edit_distance(
G1,
G2,
edge_subst_cost=edge_subst_cost,
edge_del_cost=edge_del_cost,
edge_ins_cost=edge_ins_cost,
)
== 0.23
)
def test_graph_edit_distance_upper_bound(self):
G1 = circular_ladder_graph(2)
G2 = circular_ladder_graph(6)
assert graph_edit_distance(G1, G2, upper_bound=5) is None
assert graph_edit_distance(G1, G2, upper_bound=24) == 22
assert graph_edit_distance(G1, G2) == 22
def test_optimal_edit_paths(self):
G1 = path_graph(3)
G2 = cycle_graph(3)
paths, cost = optimal_edit_paths(G1, G2)
assert cost == 1
assert len(paths) == 6
def canonical(vertex_path, edge_path):
return (
tuple(sorted(vertex_path)),
tuple(sorted(edge_path, key=lambda x: (None in x, x))),
)
expected_paths = [
(
[(0, 0), (1, 1), (2, 2)],
[((0, 1), (0, 1)), ((1, 2), (1, 2)), (None, (0, 2))],
),
(
[(0, 0), (1, 2), (2, 1)],
[((0, 1), (0, 2)), ((1, 2), (1, 2)), (None, (0, 1))],
),
(
[(0, 1), (1, 0), (2, 2)],
[((0, 1), (0, 1)), ((1, 2), (0, 2)), (None, (1, 2))],
),
(
[(0, 1), (1, 2), (2, 0)],
[((0, 1), (1, 2)), ((1, 2), (0, 2)), (None, (0, 1))],
),
(
[(0, 2), (1, 0), (2, 1)],
[((0, 1), (0, 2)), ((1, 2), (0, 1)), (None, (1, 2))],
),
(
[(0, 2), (1, 1), (2, 0)],
[((0, 1), (1, 2)), ((1, 2), (0, 1)), (None, (0, 2))],
),
]
assert {canonical(*p) for p in paths} == {canonical(*p) for p in expected_paths}
def test_optimize_graph_edit_distance(self):
G1 = circular_ladder_graph(2)
G2 = circular_ladder_graph(6)
bestcost = 1000
for cost in optimize_graph_edit_distance(G1, G2):
assert cost < bestcost
bestcost = cost
assert bestcost == 22
# def test_graph_edit_distance_bigger(self):
# G1 = circular_ladder_graph(12)
# G2 = circular_ladder_graph(16)
# assert_equal(graph_edit_distance(G1, G2), 22)
def test_selfloops(self):
G0 = nx.Graph()
G1 = nx.Graph()
G1.add_edges_from((("A", "A"), ("A", "B")))
G2 = nx.Graph()
G2.add_edges_from((("A", "B"), ("B", "B")))
G3 = nx.Graph()
G3.add_edges_from((("A", "A"), ("A", "B"), ("B", "B")))
assert graph_edit_distance(G0, G0) == 0
assert graph_edit_distance(G0, G1) == 4
assert graph_edit_distance(G1, G0) == 4
assert graph_edit_distance(G0, G2) == 4
assert graph_edit_distance(G2, G0) == 4
assert graph_edit_distance(G0, G3) == 5
assert graph_edit_distance(G3, G0) == 5
assert graph_edit_distance(G1, G1) == 0
assert graph_edit_distance(G1, G2) == 0
assert graph_edit_distance(G2, G1) == 0
assert graph_edit_distance(G1, G3) == 1
assert graph_edit_distance(G3, G1) == 1
assert graph_edit_distance(G2, G2) == 0
assert graph_edit_distance(G2, G3) == 1
assert graph_edit_distance(G3, G2) == 1
assert graph_edit_distance(G3, G3) == 0
def test_digraph(self):
G0 = nx.DiGraph()
G1 = nx.DiGraph()
G1.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("D", "A")))
G2 = nx.DiGraph()
G2.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("A", "D")))
G3 = nx.DiGraph()
G3.add_edges_from((("A", "B"), ("A", "C"), ("B", "D"), ("C", "D")))
assert graph_edit_distance(G0, G0) == 0
assert graph_edit_distance(G0, G1) == 8
assert graph_edit_distance(G1, G0) == 8
assert graph_edit_distance(G0, G2) == 8
assert graph_edit_distance(G2, G0) == 8
assert graph_edit_distance(G0, G3) == 8
assert graph_edit_distance(G3, G0) == 8
assert graph_edit_distance(G1, G1) == 0
assert graph_edit_distance(G1, G2) == 2
assert graph_edit_distance(G2, G1) == 2
assert graph_edit_distance(G1, G3) == 4
assert graph_edit_distance(G3, G1) == 4
assert graph_edit_distance(G2, G2) == 0
assert graph_edit_distance(G2, G3) == 2
assert graph_edit_distance(G3, G2) == 2
assert graph_edit_distance(G3, G3) == 0
def test_multigraph(self):
G0 = nx.MultiGraph()
G1 = nx.MultiGraph()
G1.add_edges_from((("A", "B"), ("B", "C"), ("A", "C")))
G2 = nx.MultiGraph()
G2.add_edges_from((("A", "B"), ("B", "C"), ("B", "C"), ("A", "C")))
G3 = nx.MultiGraph()
G3.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"), ("A", "C"), ("A", "C")))
assert graph_edit_distance(G0, G0) == 0
assert graph_edit_distance(G0, G1) == 6
assert graph_edit_distance(G1, G0) == 6
assert graph_edit_distance(G0, G2) == 7
assert graph_edit_distance(G2, G0) == 7
assert graph_edit_distance(G0, G3) == 8
assert graph_edit_distance(G3, G0) == 8
assert graph_edit_distance(G1, G1) == 0
assert graph_edit_distance(G1, G2) == 1
assert graph_edit_distance(G2, G1) == 1
assert graph_edit_distance(G1, G3) == 2
assert graph_edit_distance(G3, G1) == 2
assert graph_edit_distance(G2, G2) == 0
assert graph_edit_distance(G2, G3) == 1
assert graph_edit_distance(G3, G2) == 1
assert graph_edit_distance(G3, G3) == 0
def test_multidigraph(self):
G1 = nx.MultiDiGraph()
G1.add_edges_from(
(
("hardware", "kernel"),
("kernel", "hardware"),
("kernel", "userspace"),
("userspace", "kernel"),
)
)
G2 = nx.MultiDiGraph()
G2.add_edges_from(
(
("winter", "spring"),
("spring", "summer"),
("summer", "autumn"),
("autumn", "winter"),
)
)
assert graph_edit_distance(G1, G2) == 5
assert graph_edit_distance(G2, G1) == 5
# by https://github.com/jfbeaumont
def testCopy(self):
G = nx.Graph()
G.add_node("A", label="A")
G.add_node("B", label="B")
G.add_edge("A", "B", label="a-b")
assert (
graph_edit_distance(G, G.copy(), node_match=nmatch, edge_match=ematch) == 0
)
def testSame(self):
G1 = nx.Graph()
G1.add_node("A", label="A")
G1.add_node("B", label="B")
G1.add_edge("A", "B", label="a-b")
G2 = nx.Graph()
G2.add_node("A", label="A")
G2.add_node("B", label="B")
G2.add_edge("A", "B", label="a-b")
assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 0
def testOneEdgeLabelDiff(self):
G1 = nx.Graph()
G1.add_node("A", label="A")
G1.add_node("B", label="B")
G1.add_edge("A", "B", label="a-b")
G2 = nx.Graph()
G2.add_node("A", label="A")
G2.add_node("B", label="B")
G2.add_edge("A", "B", label="bad")
assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1
def testOneNodeLabelDiff(self):
G1 = nx.Graph()
G1.add_node("A", label="A")
G1.add_node("B", label="B")
G1.add_edge("A", "B", label="a-b")
G2 = nx.Graph()
G2.add_node("A", label="Z")
G2.add_node("B", label="B")
G2.add_edge("A", "B", label="a-b")
assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1
def testOneExtraNode(self):
G1 = nx.Graph()
G1.add_node("A", label="A")
G1.add_node("B", label="B")
G1.add_edge("A", "B", | |
"""
control_utils.py utility module for manipulating controllers
"""
# import standard modules
import re
# import local modules
import read_sides
import constraint_utils
import name_utils
# import custom modules
from maya_utils import file_utils
from maya_utils import object_utils
from maya_utils import attribute_utils
from maya_utils import transform_utils
from maya_utils import curve_utils
# import maya modules
from maya import cmds
# define private variables
__control_folder_dir__ = file_utils.controller_data_dir()
# define local variables
CTRL_SUFFIX = name_utils.CTRL_SUFFIX
LOCATOR_SUFFIX = name_utils.LOCATOR_SUFFIX
CONSTRAINT_GRP = name_utils.CONSTRAINT_GRP
GROUP_NAME = name_utils.GROUP_NAME
re_brackets = re.compile(r'\[|]')
re_numbers = re.compile('_\d+')
transform_attrs = attribute_utils.Attributes.DEFAULT_ATTR_VALUES
side_cls = read_sides.Sides()
def get_controllers():
"""
get all controllers in scene.
:return: <tuple> controller objects.
"""
return tuple(cmds.ls('*_{}'.format(CTRL_SUFFIX)) + cmds.ls('*:*_{}'.format(CTRL_SUFFIX)))
def get_selected_ctrl():
"""
the joints from current selection.
:return: <tuple> array of joints from selection.
"""
selected_obj = object_utils.get_selected_node()
if selected_obj and object_utils.is_shape_curve(selected_obj):
return selected_obj
def mirror_transforms(object_name=""):
"""
mirror the transform controllers. **Must have corresponding left/ right naming.
:param object_name: <str> the object to get transform values and find the mirror object from.
:return: <bool> True for success. <bool> False for failure.
"""
if not object_name:
object_name = object_utils.get_selected_node(single=True)
mirror_obj_name = ''
if '_l_' in object_name:
mirror_obj_name = object_name.replace('_l_', '_r_')
if '_r_' in object_name:
mirror_obj_name = object_name.replace('_r_', '_l_')
if mirror_obj_name == mirror_obj_name:
return False
p_object = object_utils.get_transform_relatives(
object_name, find_parent=True, as_strings=True)[0]
p_mirror_object = object_utils.get_transform_relatives(
mirror_obj_name, find_parent=True, as_strings=True)[0]
p_trm = transform_utils.Transform(p_object)
matrix = p_trm.world_matrix()
mirror_matrix = p_trm.mirror_matrix(matrix)
cmds.xform(p_mirror_object, m=mirror_matrix, ws=1)
return True
def create_locators():
"""
create locators on position
"""
for sl in cmds.ls(sl=1):
if '.' in sl:
name, dot, num = sl.partition('.')
matrix = False
translate = True
name += '_{}'.format(re_brackets.sub('', num))
else:
matrix = True
translate = False
name = sl
locator_name = name + '_{}'.format(LOCATOR_SUFFIX)
cmds.createNode('locator', name=locator_name + 'Shape')
object_utils.snap_to_transform(locator_name, sl, matrix=matrix, translate=translate)
return True
def copy_xform(object_1, object_2):
"""
copy the worldSpace matrix from object_2 to object_1.
:param object_1: <str> the first object to snap the second object to.
:param object_2: <str> the second object.
:return: <bool> True for success.
"""
x_mat = cmds.xform(object_1, m=1, q=1, ws=1)
cmds.xform(object_2, m=x_mat, ws=1)
return True
def check_locator_suffix_name(object_name):
"""
checks if the incoming object has the locator suffix name.
:param object_name:
:return:
"""
suffix_name = '_{}'.format(LOCATOR_SUFFIX)
if suffix_name in object_name:
return True
return False
def remove_locator_suffix_name(object_name):
"""
splits the object name from its locator suffix name.
:param object_name:
:return:
"""
suffix_name = '_{}'.format(LOCATOR_SUFFIX)
return object_name.rpartition(suffix_name)[0]
def check_control_suffix_name(object_name):
"""
checks if the incoming object has the locator suffix name.
:param object_name: <str> the object name to split.
:return: <str> formatted name.
"""
suffix_name = '_{}'.format(CTRL_SUFFIX)
if suffix_name in object_name:
return True
return False
def remove_control_suffix_name(object_name):
"""
splits the object name from its control suffix name.
:param object_name: <str> the object name to split.
:return: <str> formatted name.
"""
return object_name.rpartition('_{}'.format(CTRL_SUFFIX))[0]
def snap_control_to_selected_locator():
"""
snap the controller to the selected locator with the same leading name.
:return: <bool> True for success.
"""
for sl in cmds.ls(sl=1, type='transform'):
if not check_locator_suffix_name(sl):
continue
ctrl_name = remove_locator_suffix_name(sl)
copy_xform(ctrl_name, sl)
return True
def get_shape_names(transforms_array=()):
"""
from an array of transform objects given, return an array of associated shape names.
:param transforms_array: <tuple> array of transform objects
:return: <tuple> shape names.
"""
shapes_array = ()
for transform_name in transforms_array:
shapes_array += object_utils.get_shape_name(transform_name, "nurbsCurve")
return shapes_array
def color_code_controllers():
"""
color code all controller shape names.
:return: <bool> True for success.
"""
ctrl_curves = get_controllers()
shape_names_array = get_shape_names(ctrl_curves)
for shape_name in shape_names_array:
# get a uniform side name
side_name = side_cls.side_name_from_string(shape_name)
if side_name == 'Center':
curve_utils.set_nurb_shape_color(shape_name, color='yellow')
if side_name == 'Left':
curve_utils.set_nurb_shape_color(shape_name, color='blue')
if side_name == 'Right':
curve_utils.set_nurb_shape_color(shape_name, color='red')
return True
def attr_str(ctrl_name, attr_name):
"""
join the two strings together to form the attribute name.
:param ctrl_name:
:param attr_name:
:return:
"""
return '{}.{}'.format(ctrl_name, attr_name)
def zero_controllers():
"""
assuming controllers are all named with a suffix _ctrl
:return: <bool> True for success.
"""
for ctrl_name in get_controllers():
for attr_name, attr_val in transform_attrs.items():
ctrl_attribute = attr_str(ctrl_name, attr_name)
if not object_utils.is_attr_keyable(ctrl_name, attr_name):
continue
if object_utils.is_attr_connected(ctrl_name, attr_name):
continue
cmds.setAttr(ctrl_attribute, attr_val)
return True
def zero_all_controllers():
"""
zeroes out all the scene controllers.
:return: <bool> True for success.
"""
for ctrl_name in get_controllers():
c_attr = attribute_utils.Attributes(ctrl_name, keyable=True)
if c_attr.non_zero_attributes():
c_attr.zero_attributes()
return True
def get_controller_path(shape_name):
"""
returns the shape name path.
:param shape_name:
:return:
"""
return file_utils.concatenate_path(__control_folder_dir__, shape_name)
def save_controller_shape(controller_name):
"""
saves the controller shape data to file.
:return: <str> controller file path name.
"""
curve_data = curve_utils.get_nurb_data(controller_name)
controller_data_file_name = get_controller_path(controller_name)
json_cls = file_utils.JSONSerializer(file_name=controller_data_file_name)
json_cls.write(data=curve_data)
# print("[ControllerShapeFile] :: {}".format(json_cls.file_name))
return controller_data_file_name
def find_shape_in_dir(shape_name):
"""
returns the shape name from the directory.
:param shape_name:
:return:
"""
return filter(lambda x: shape_name in file_utils.split_file_name(x), find_controller_shapes())
def is_shape_in_dir(shape_name):
"""
finds if the file name is in the directory.
:param shape_name: <str> find this name in the shape directory.
:return: <bool> the shape name exists in directory.
"""
return bool(find_shape_in_dir(shape_name))
def get_controller_data_file(shape_name):
"""
get the data from shape name given.
:param shape_name:
:return:
"""
if not is_shape_in_dir(shape_name):
raise IOError("[NoControllerShapesFoundInDir] :: {}".format(shape_name))
shape_file = find_shape_in_dir(shape_name)[0]
controller_data_file_name = get_controller_path(shape_file)
json_cls = file_utils.JSONSerializer(file_name=controller_data_file_name)
return json_cls.read()
def find_controller_shapes():
"""
finds all the saves controller shapes.
:return: <tuple> array of files.
"""
return file_utils.list_controller_files()
def insert_groups(object_name="", names=()):
"""
insert transform groups.
:param object_name: <str> insert groups here.
:param names: <tuple> array of names to use to create the groups with.
:return:
"""
grps = ()
for name in names:
grps += object_utils.insert_transform(object_name, name),
return grps
def create_controller_shape(shape_name):
"""
creates the shape from the file.
:param shape_name:
:return: <tuple> array of created curves.
"""
curve_data = get_controller_data_file(shape_name)
curves = ()
for c_name, c_data in curve_data.items():
form = c_data['form']
knots = c_data['knots']
cvs = c_data['cvs']
degree = c_data['degree']
order = c_data['order']
cv_length = len(cvs)
cv_points = ()
# knot = cv_length + degree - 1
for cv_point in cvs:
cv_points += cv_point[1:],
try:
curves += cmds.curve(p=cv_points, k=knots[:-2], degree=degree),
except RuntimeError:
curves += cmds.curve(p=cv_points, k=knots, degree=degree),
return curves
def get_curve_shape_name(name=""):
"""
return the name of the curves.
:param name: <str> base name.
:return: <str> curve shape name.
"""
return '{}Shape'.format(name)
def get_curve_shapes_in_array(curve_names):
"""
get the curve shape names in the array given.
:param curve_names:
:return:
"""
c_shapes = ()
for c_name in curve_names:
c_shapes += object_utils.get_shape_name(c_name)[0],
return c_shapes
def parent_curve_shapes(curve_names):
"""
parents the shapes of the curves to the last curve in the array.
:param curve_names: <tuple> array of objects to parent.
:return: <str> the name of the curve.
"""
curve_name = curve_names[-1]
if len(curve_names) != 1:
curve_shapes = list(get_curve_shapes_in_array(curve_names))
cmds.parent(curve_shapes[:-1] + [curve_name], r=True, s=True)
cmds.delete(curve_names[:-1])
return curve_name
def create_control(shape_name='cube', name='', groups=('grp', CONSTRAINT_GRP)):
"""
create a controller object with specified groups.
:param shape_name: <str> create this shape.
:param name: <str> the name of the controller to name.
:param groups: <tuple> array of group suffixes to create.
:return: <tuple> group names belonging to this controller name.
"""
return_data = {}
curve_names = create_controller_shape(shape_name)
curve_name = parent_curve_shapes(curve_names)
# renames the default curve name into specified name string
if name:
curve_name = cmds.rename(curve_name, name)
else:
curve_name = cmds.rename(curve_name, shape_name)
return_data['controller'] = curve_name
group_names = map(lambda x: '{}_{}'.format(curve_name, x), groups)
grps = insert_groups(curve_name, names=group_names)
return_data['group_names'] = grps
return return_data
def create_control_at_transform(object_name, name='', shape_name="cube", auto_num=True):
"""
creates a controller object at the same space as the transform.
:param object_name: <str> object name to use.
:param name: <str> the name for the new controller object.
:param shape_name: <str> build this shape.
:param auto_num: <int> generate a number associated with the name.
:return: <str> control grp.
"""
tfm = transform_utils.Transform(object_name)
if auto_num:
name = name_utils.get_start_name_with_num(name)
ctrl_data = create_control(shape_name, name=name)
grps = ctrl_data['group_names']
cmds.xform(grps[-1], m=tfm.world_matrix(), ws=1)
return ctrl_data
def get_control_name(name, idx=0):
"""
return the controller name.
:param name: <str> the base name
:param idx: <int> integer index for iteration.
:return: <str> controller name.
"""
return '{}_{}_{}'.format(name, idx, CTRL_SUFFIX)
def rename_controls(ctrl_grp, new_name=""):
"""
renames the controller from the group name.
:param ctrl_grp: <str> controller group.
:param new_name: <str> new name used.
:return: <tuple> the names of the children created.
"""
children = object_utils.get_transform_relatives(ctrl_grp, find_child=True, as_strings=True)
new_children = ()
for ch in children:
part_name = ch.partition('_')
cmds.rename(ch, ''.join((new_name, part_name[1], part_name[2])))
new_children += ch,
return new_children
def create_controls(objects_array, name='', shape_name="cube", apply_constraints=None, maintain_offset=False):
"""
creates controllers at this transform object name.
:param name: <str> create curves with this object name.
:param objects_array: <tuple> array of objects.
:param shape_name: <str> build this shape.
:param apply_constraints: | |
<reponame>TanayShukla/Random
#!/usr/bin/env python
#
# Copyright (C) 2011-2012 <NAME>
# Copyright (C) 2011-2012 <NAME>
# Copyright (C) 2005-2011 <NAME>
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# This program will execute any file with name test*<digit>.py. If your test
# need an aditional dependency name it test*<digit><letter>.py to be ignored
# by this program but be recognizable by any one as a dependency of that
# particular test.
import glob
import optparse
import os
import re
import shutil
import sys
try:
import PyInstaller
except ImportError:
# if importing PyInstaller fails, try to load from parent
# directory to support running without installation
import imp
if not hasattr(os, "getuid") or os.getuid() != 0:
imp.load_module('PyInstaller', *imp.find_module('PyInstaller',
[os.path.dirname(os.path.dirname(os.path.abspath(__file__)))]))
from PyInstaller import HOMEPATH
from PyInstaller import is_py23, is_py24, is_py25, is_py26, is_win, is_darwin
from PyInstaller import compat
from PyInstaller.lib import unittest2 as unittest
from PyInstaller.lib import junitxml
from PyInstaller.utils import misc
VERBOSE = False
REPORT = False
# Directory with this script (runtests.py).
BASEDIR = os.path.dirname(os.path.abspath(__file__))
class MiscDependencies(object):
"""
Place holder for special requirements of some tests.
e.g. basic/test_ctypes needs C compiler.
Every method returns None when successful or a string containing
error message to be displayed on console.
"""
def c_compiler(self):
"""
Check availability of C compiler.
"""
compiler = None
msg = 'Cannot find GCC, MinGW or Visual Studio in PATH.'
if is_win:
# Try MSVC.
compiler = misc.find_executable('cl')
if compiler is None:
# Try GCC.
compiler = misc.find_executable('gcc')
if compiler is None:
return msg
return None # C compiler was found.
class SkipChecker(object):
"""
Check conditions if a test case should be skipped.
"""
def __init__(self):
depend = MiscDependencies()
# Required Python or OS version for some tests.
self.MIN_VERSION_OR_OS = {
'basic/test_time': is_py23,
'basic/test_celementtree': is_py25,
'basic/test_email': is_py25,
# On Mac DYLD_LIBRARY_PATH is not used.
'basic/test_absolute_ld_library_path': not is_win and not is_darwin,
'import/test_c_extension': is_py25,
'import/test_onefile_c_extension': is_py25,
'import/test_relative_import': is_py25,
'import/test_relative_import2': is_py26,
'import/test_relative_import3': is_py25,
'libraries/test_enchant': is_win,
}
# Required Python modules for some tests.
self.MODULES = {
'basic/test_ctypes': ['ctypes'],
'basic/test_module_attributes': ['xml.etree.cElementTree'],
'basic/test_nestedlaunch1': ['ctypes'],
'basic/test_onefile_multiprocess': ['multiprocessing'],
'libraries/test_enchant': ['enchant'],
'libraries/test_Image': ['PIL'],
'libraries/test_Image2': ['PIL'],
'libraries/test_numpy': ['numpy'],
'libraries/test_onefile_tkinter': ['Tkinter'],
'libraries/test_PIL': ['PIL'],
'libraries/test_PIL2': ['PIL'],
'libraries/test_pycrypto': ['Crypto'],
'libraries/test_pyodbc': ['pyodbc'],
'libraries/test_pyttsx': ['pyttsx'],
'libraries/test_sqlalchemy': ['sqlalchemy', 'MySQLdb', 'psycopg2'],
'libraries/test_usb': ['ctypes', 'usb'],
'libraries/test_wx': ['wx'],
'libraries/test_wx_pubsub': ['wx'],
'libraries/test_wx_pubsub_arg1': ['wx'],
'libraries/test_wx_pubsub_kwargs': ['wx'],
'import/test_c_extension': ['simplejson'],
'import/test_ctypes_cdll_c': ['ctypes'],
'import/test_ctypes_cdll_c2': ['ctypes'],
'import/test_eggs2': ['pkg_resources'],
'import/test_onefile_c_extension': ['simplejson'],
'import/test_onefile_zipimport': ['pkg_resources'],
'import/test_onefile_zipimport2': ['pkg_resources', 'setuptools'],
'interactive/test_pygame': ['pygame'],
}
# Other dependecies of some tests.
self.DEPENDENCIES = {
'basic/test_ctypes': [depend.c_compiler()],
# Support for unzipped eggs is not yet implemented.
# http://www.pyinstaller.org/ticket/541
'import/test_eggs1': ['Unzipped eggs not yet implemented.'],
}
def _check_python_and_os(self, test_name):
"""
Return True if test name is not in the list or Python or OS
version is not met.
"""
if (test_name in self.MIN_VERSION_OR_OS and
not self.MIN_VERSION_OR_OS[test_name]):
return False
return True
def _check_modules(self, test_name):
"""
Return name of missing required module, if any. None means
no module is missing.
"""
if test_name in self.MODULES:
for mod_name in self.MODULES[test_name]:
# STDOUT and STDERR are discarded (devnull) to hide
# import exceptions.
trash = open(compat.devnull)
retcode = compat.exec_python_rc('-c', "import %s" % mod_name,
stdout=trash, stderr=trash)
trash.close()
if retcode != 0:
return mod_name
return None
def _check_dependencies(self, test_name):
"""
Return error message when a requirement is not met, None otherwise.
"""
if test_name in self.DEPENDENCIES:
for dep in self.DEPENDENCIES[test_name]:
if dep is not None:
return dep
return None
def check(self, test_name):
"""
Check test requirements if they are any specified.
Return tupple (True/False, 'Reason for skipping.').
True if all requirements are met. Then test case may
be executed.
"""
if not self._check_python_and_os(test_name):
return (False, 'Required another Python version or OS.')
required_module = self._check_modules(test_name)
if required_module is not None:
return (False, "Module %s is missing." % required_module)
dependency = self._check_dependencies(test_name)
if dependency is not None:
return (False, dependency)
return (True, 'Requirements met.')
NO_SPEC_FILE = [
'basic/test_absolute_ld_library_path',
'basic/test_absolute_python_path',
'basic/test_email',
'basic/test_email_oldstyle',
'basic/test_onefile_multiprocess',
'basic/test_python_home',
'import/test_c_extension',
'import/test_onefile_c_extension',
'import/test_onefile_zipimport',
'import/test_onefile_zipimport2',
'libraries/test_enchant',
'libraries/test_onefile_tkinter',
'libraries/test_sqlalchemy',
'libraries/test_pyodbc',
'libraries/test_pyttsx',
'libraries/test_usb',
'libraries/test_wx_pubsub',
'libraries/test_wx_pubsub_kwargs',
'libraries/test_wx_pubsub_arg1'
]
class BuildTestRunner(object):
def __init__(self, test_name, verbose=False, report=False):
# Use path separator '/' even on windows for test_name name.
self.test_name = test_name.replace('\\', '/')
self.verbose = verbose
self.test_dir, self.test_file = os.path.split(self.test_name)
# For junit xml report some behavior is changed.
# Especially redirecting sys.stdout.
self.report = report
def _msg(self, text):
"""
Important text. Print it to console only in verbose mode.
"""
if self.verbose:
# This allows to redirect stdout to junit xml report.
sys.stdout.write('\n' + 10 * '#' + ' ' + text + ' ' + 10 * '#' + '\n\n')
sys.stdout.flush()
def _plain_msg(self, text):
"""
Print text to console only in verbose mode.
"""
if self.verbose:
sys.stdout.write(text + '\n')
sys.stdout.flush()
def _find_exepath(self, test, parent_dir='dist'):
of_prog = os.path.join(parent_dir, test) # one-file deploy filename
od_prog = os.path.join(parent_dir, test, test) # one-dir deploy filename
prog = None
if os.path.isfile(of_prog):
prog = of_prog
elif os.path.isfile(of_prog + ".exe"):
prog = of_prog + ".exe"
elif os.path.isdir(of_prog):
if os.path.isfile(od_prog):
prog = od_prog
elif os.path.isfile(od_prog + ".exe"):
prog = od_prog + ".exe"
return prog
def _run_created_exe(self, test, testdir=None):
"""
Run executable created by PyInstaller.
"""
self._msg('EXECUTING TEST ' + self.test_name)
# Run the test in a clean environment to make sure they're
# really self-contained
path = compat.getenv('PATH')
compat.unsetenv('PATH')
prog = self._find_exepath(test, 'dist')
if prog is None:
self._plain_msg('ERROR: no file generated by PyInstaller found!')
compat.setenv("PATH", path)
return 1
else:
self._plain_msg("RUNNING: " + prog)
old_wd = os.getcwd()
os.chdir(os.path.dirname(prog))
prog = os.path.join(os.curdir, os.path.basename(prog))
retcode, out, err = compat.exec_command_all(prog)
os.chdir(old_wd)
self._msg('STDOUT %s' % self.test_name)
self._plain_msg(out)
self._msg('STDERR %s' % self.test_name)
self._plain_msg(err)
compat.setenv("PATH", path)
return retcode
def test_exists(self):
"""
Return True if test file exists.
"""
return os.path.exists(os.path.join(BASEDIR, self.test_name + '.py'))
def test_building(self):
"""
Run building of test script.
Return True if build succeded False otherwise.
"""
OPTS = ['--debug']
if self.verbose:
OPTS.extend(['--debug', '--log-level=INFO'])
else:
OPTS.append('--log-level=ERROR')
# Build executable in onefile mode.
if self.test_file.startswith('test_onefile'):
OPTS.append('--onefile')
else:
OPTS.append('--onedir')
self._msg("BUILDING TEST " + self.test_name)
# Use pyinstaller.py for building test_name.
testfile_spec = self.test_file + '.spec'
if not os.path.exists(self.test_file + '.spec'):
# .spec file does not exist and it has to be generated
# for main script.
testfile_spec = self.test_file + '.py'
pyinst_script = os.path.join(HOMEPATH, 'pyinstaller.py')
# In report mode is stdout and sys.stderr redirected.
if self.report:
# Write output from subprocess to stdout/err.
retcode, out, err = compat.exec_python_all(pyinst_script,
testfile_spec, *OPTS)
sys.stdout.write(out)
sys.stdout.write(err)
else:
retcode = compat.exec_python_rc(pyinst_script,
testfile_spec, *OPTS)
return retcode == 0
def test_exe(self):
"""
Test running of all created executables.
"""
files = glob.glob(os.path.join('dist', self.test_file + '*'))
retcode = 0
for exe in files:
exe = os.path.splitext(exe)[0]
retcode_tmp = self._run_created_exe(exe[5:], self.test_dir)
retcode = retcode or retcode_tmp
return retcode == 0
def test_logs(self):
"""
Compare log files (now used only by multipackage test_name).
Return True if .toc files match or when .toc patters
are not defined.
"""
logsfn = glob.glob(self.test_file + '.toc')
# Other main scritps do not start with 'test_'.
logsfn += glob.glob(self.test_file.split('_', 1)[1] + '_?.toc')
for logfn in logsfn:
self._msg("EXECUTING MATCHING " + logfn)
tmpname = os.path.splitext(logfn)[0]
prog = self._find_exepath(tmpname)
if prog is None:
prog = self._find_exepath(tmpname,
os.path.join('dist', self.test_file))
fname_list = compat.exec_python(
os.path.join(HOMEPATH, 'utils', 'ArchiveViewer.py'),
'-b', '-r', prog)
# Fix line-endings so eval() does not fail.
fname_list = fname_list.replace('\r\n', '\n').replace('\n\r', '\n')
fname_list = eval(fname_list)
pattern_list = eval(open(logfn, 'rU').read())
# Alphabetical order of patterns.
pattern_list.sort()
count = 0
for pattern in pattern_list:
found = False
for fname in fname_list:
if re.match(pattern, fname):
count += 1
found = True
self._plain_msg('MATCH: %s --> %s' % | |
= Parameter(name = 'VV1x1',
nature = 'internal',
type = 'complex',
value = 'RVV1x1',
texname = '\\text{VV1x1}')
VV1x2 = Parameter(name = 'VV1x2',
nature = 'internal',
type = 'complex',
value = 'RVV1x2',
texname = '\\text{VV1x2}')
VV2x1 = Parameter(name = 'VV2x1',
nature = 'internal',
type = 'complex',
value = 'RVV2x1',
texname = '\\text{VV2x1}')
VV2x2 = Parameter(name = 'VV2x2',
nature = 'internal',
type = 'complex',
value = 'RVV2x2',
texname = '\\text{VV2x2}')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(1/aEWM1)*cmath.sqrt(cmath.pi)',
texname = 'e')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
td3x3 = Parameter(name = 'td3x3',
nature = 'internal',
type = 'complex',
value = 'Rtd3x3',
texname = '\\text{td3x3}')
te3x3 = Parameter(name = 'te3x3',
nature = 'internal',
type = 'complex',
value = 'Rte3x3',
texname = '\\text{te3x3}')
tu3x3 = Parameter(name = 'tu3x3',
nature = 'internal',
type = 'complex',
value = 'Rtu3x3',
texname = '\\text{tu3x3}')
yd3x3 = Parameter(name = 'yd3x3',
nature = 'internal',
type = 'complex',
value = 'Ryd3x3',
texname = '\\text{yd3x3}')
ye3x3 = Parameter(name = 'ye3x3',
nature = 'internal',
type = 'complex',
value = 'Rye3x3',
texname = '\\text{ye3x3}')
yu3x3 = Parameter(name = 'yu3x3',
nature = 'internal',
type = 'complex',
value = 'Ryu3x3',
texname = '\\text{yu3x3}')
bb = Parameter(name = 'bb',
nature = 'internal',
type = 'complex',
value = '((-mHd2 + mHu2 - MZ**2*cmath.cos(2*beta))*cmath.tan(2*beta))/2.',
texname = 'b')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - cw**2)',
texname = 's_w')
gp = Parameter(name = 'gp',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g\'')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*cw*MZ*sw)/ee',
texname = 'v')
vd = Parameter(name = 'vd',
nature = 'internal',
type = 'real',
value = 'vev*cmath.cos(beta)',
texname = 'v_d')
vu = Parameter(name = 'vu',
nature = 'internal',
type = 'real',
value = 'vev*cmath.sin(beta)',
texname = 'v_u')
I1x33 = Parameter(name = 'I1x33',
nature = 'internal',
type = 'complex',
value = 'complexconjugate(CKM3x3)*complexconjugate(yu3x3)',
texname = '\\text{I1x33}')
I10x33 = Parameter(name = 'I10x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(yd3x3)',
texname = '\\text{I10x33}')
I10x36 = Parameter(name = 'I10x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(yd3x3)',
texname = '\\text{I10x36}')
I100x33 = Parameter(name = 'I100x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd3x6)',
texname = '\\text{I100x33}')
I100x36 = Parameter(name = 'I100x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd3x6)',
texname = '\\text{I100x36}')
I100x44 = Parameter(name = 'I100x44',
nature = 'internal',
type = 'complex',
value = 'Rd4x4*complexconjugate(Rd4x4)',
texname = '\\text{I100x44}')
I100x55 = Parameter(name = 'I100x55',
nature = 'internal',
type = 'complex',
value = 'Rd5x5*complexconjugate(Rd5x5)',
texname = '\\text{I100x55}')
I100x63 = Parameter(name = 'I100x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd6x6)',
texname = '\\text{I100x63}')
I100x66 = Parameter(name = 'I100x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd6x6)',
texname = '\\text{I100x66}')
I101x33 = Parameter(name = 'I101x33',
nature = 'internal',
type = 'complex',
value = 'Rl3x6*complexconjugate(Rl3x6)',
texname = '\\text{I101x33}')
I101x36 = Parameter(name = 'I101x36',
nature = 'internal',
type = 'complex',
value = 'Rl6x6*complexconjugate(Rl3x6)',
texname = '\\text{I101x36}')
I101x44 = Parameter(name = 'I101x44',
nature = 'internal',
type = 'complex',
value = 'Rl4x4*complexconjugate(Rl4x4)',
texname = '\\text{I101x44}')
I101x55 = Parameter(name = 'I101x55',
nature = 'internal',
type = 'complex',
value = 'Rl5x5*complexconjugate(Rl5x5)',
texname = '\\text{I101x55}')
I101x63 = Parameter(name = 'I101x63',
nature = 'internal',
type = 'complex',
value = 'Rl3x6*complexconjugate(Rl6x6)',
texname = '\\text{I101x63}')
I101x66 = Parameter(name = 'I101x66',
nature = 'internal',
type = 'complex',
value = 'Rl6x6*complexconjugate(Rl6x6)',
texname = '\\text{I101x66}')
I102x33 = Parameter(name = 'I102x33',
nature = 'internal',
type = 'complex',
value = 'Ru3x6*complexconjugate(Ru3x6)',
texname = '\\text{I102x33}')
I102x36 = Parameter(name = 'I102x36',
nature = 'internal',
type = 'complex',
value = 'Ru6x6*complexconjugate(Ru3x6)',
texname = '\\text{I102x36}')
I102x44 = Parameter(name = 'I102x44',
nature = 'internal',
type = 'complex',
value = 'Ru4x4*complexconjugate(Ru4x4)',
texname = '\\text{I102x44}')
I102x55 = Parameter(name = 'I102x55',
nature = 'internal',
type = 'complex',
value = 'Ru5x5*complexconjugate(Ru5x5)',
texname = '\\text{I102x55}')
I102x63 = Parameter(name = 'I102x63',
nature = 'internal',
type = 'complex',
value = 'Ru3x6*complexconjugate(Ru6x6)',
texname = '\\text{I102x63}')
I102x66 = Parameter(name = 'I102x66',
nature = 'internal',
type = 'complex',
value = 'Ru6x6*complexconjugate(Ru6x6)',
texname = '\\text{I102x66}')
I11x33 = Parameter(name = 'I11x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3',
texname = '\\text{I11x33}')
I11x36 = Parameter(name = 'I11x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3',
texname = '\\text{I11x36}')
I12x11 = Parameter(name = 'I12x11',
nature = 'internal',
type = 'complex',
value = 'Rd1x1*complexconjugate(Rd1x1)',
texname = '\\text{I12x11}')
I12x22 = Parameter(name = 'I12x22',
nature = 'internal',
type = 'complex',
value = 'Rd2x2*complexconjugate(Rd2x2)',
texname = '\\text{I12x22}')
I12x33 = Parameter(name = 'I12x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd3x3)',
texname = '\\text{I12x33}')
I12x36 = Parameter(name = 'I12x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd3x3)',
texname = '\\text{I12x36}')
I12x63 = Parameter(name = 'I12x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd6x3)',
texname = '\\text{I12x63}')
I12x66 = Parameter(name = 'I12x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd6x3)',
texname = '\\text{I12x66}')
I13x33 = Parameter(name = 'I13x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd3x6)',
texname = '\\text{I13x33}')
I13x36 = Parameter(name = 'I13x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd3x6)',
texname = '\\text{I13x36}')
I13x44 = Parameter(name = 'I13x44',
nature = 'internal',
type = 'complex',
value = 'Rd4x4*complexconjugate(Rd4x4)',
texname = '\\text{I13x44}')
I13x55 = Parameter(name = 'I13x55',
nature = 'internal',
type = 'complex',
value = 'Rd5x5*complexconjugate(Rd5x5)',
texname = '\\text{I13x55}')
I13x63 = Parameter(name = 'I13x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*complexconjugate(Rd6x6)',
texname = '\\text{I13x63}')
I13x66 = Parameter(name = 'I13x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*complexconjugate(Rd6x6)',
texname = '\\text{I13x66}')
I14x33 = Parameter(name = 'I14x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd3x6)*complexconjugate(td3x3)',
texname = '\\text{I14x33}')
I14x36 = Parameter(name = 'I14x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd3x6)*complexconjugate(td3x3)',
texname = '\\text{I14x36}')
I14x63 = Parameter(name = 'I14x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd6x6)*complexconjugate(td3x3)',
texname = '\\text{I14x63}')
I14x66 = Parameter(name = 'I14x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd6x6)*complexconjugate(td3x3)',
texname = '\\text{I14x66}')
I15x33 = Parameter(name = 'I15x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd3x6)*complexconjugate(yd3x3)',
texname = '\\text{I15x33}')
I15x36 = Parameter(name = 'I15x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd3x6)*complexconjugate(yd3x3)',
texname = '\\text{I15x36}')
I15x63 = Parameter(name = 'I15x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*complexconjugate(Rd6x6)*complexconjugate(yd3x3)',
texname = '\\text{I15x63}')
I15x66 = Parameter(name = 'I15x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*complexconjugate(Rd6x6)*complexconjugate(yd3x3)',
texname = '\\text{I15x66}')
I16x33 = Parameter(name = 'I16x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*td3x3*complexconjugate(Rd3x3)',
texname = '\\text{I16x33}')
I16x36 = Parameter(name = 'I16x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*td3x3*complexconjugate(Rd3x3)',
texname = '\\text{I16x36}')
I16x63 = Parameter(name = 'I16x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*td3x3*complexconjugate(Rd6x3)',
texname = '\\text{I16x63}')
I16x66 = Parameter(name = 'I16x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*td3x3*complexconjugate(Rd6x3)',
texname = '\\text{I16x66}')
I17x33 = Parameter(name = 'I17x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yd3x3*complexconjugate(Rd3x3)*complexconjugate(yd3x3)',
texname = '\\text{I17x33}')
I17x36 = Parameter(name = 'I17x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yd3x3*complexconjugate(Rd3x3)*complexconjugate(yd3x3)',
texname = '\\text{I17x36}')
I17x63 = Parameter(name = 'I17x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x3*yd3x3*complexconjugate(Rd6x3)*complexconjugate(yd3x3)',
texname = '\\text{I17x63}')
I17x66 = Parameter(name = 'I17x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x3*yd3x3*complexconjugate(Rd6x3)*complexconjugate(yd3x3)',
texname = '\\text{I17x66}')
I18x33 = Parameter(name = 'I18x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(Rd3x3)',
texname = '\\text{I18x33}')
I18x36 = Parameter(name = 'I18x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(Rd3x3)',
texname = '\\text{I18x36}')
I18x63 = Parameter(name = 'I18x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(Rd6x3)',
texname = '\\text{I18x63}')
I18x66 = Parameter(name = 'I18x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(Rd6x3)',
texname = '\\text{I18x66}')
I19x33 = Parameter(name = 'I19x33',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(Rd3x6)*complexconjugate(yd3x3)',
texname = '\\text{I19x33}')
I19x36 = Parameter(name = 'I19x36',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(Rd3x6)*complexconjugate(yd3x3)',
texname = '\\text{I19x36}')
I19x63 = Parameter(name = 'I19x63',
nature = 'internal',
type = 'complex',
value = 'Rd3x6*yd3x3*complexconjugate(Rd6x6)*complexconjugate(yd3x3)',
texname = '\\text{I19x63}')
I19x66 = Parameter(name = 'I19x66',
nature = 'internal',
type = 'complex',
value = 'Rd6x6*yd3x3*complexconjugate(Rd6x6)*complexconjugate(yd3x3)',
texname = '\\text{I19x66}')
I2x33 = Parameter(name = 'I2x33',
nature = 'internal',
type = 'complex',
value = 'yd3x3*complexconjugate(CKM3x3)',
texname = '\\text{I2x33}')
I20x33 = Parameter(name = 'I20x33',
nature = 'internal',
type = 'complex',
| |
############################################# IMPORTING MODULES
import tkinter as tk
from tkinter import ttk
from tkinter import PhotoImage
from tkinter import Canvas
from tkinter import messagebox as mess
import tkinter.simpledialog as tsd
import os
from cv2 import cv2
import csv
import numpy as np
from PIL import Image, ImageTk
import pandas as pd
import datetime
import time
import yagmail
############################################# FUNCTIONS ################################################
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
##################################################################################
def tick():
time_string = time.strftime('%H:%M:%S')
clock.config(text=time_string)
clock.after(200, tick)
###################################################################################
def contact():
mess._show(title='Contact us', message="Please contact us on : '<EMAIL>' ")
###################################################################################
def Close():
window.destroy()
###################################################################################
#receiver="<EMAIL>"
def sendMail():
assure_path_exists("/Users/harishreddy/MyFiles/Coursera/Facial Attendance System/Tkinter-1/TrainingImageLabel/")
exists1 = os.path.isfile("/Users/harishreddy/MyFiles/Coursera/Facial Attendance System/Tkinter-1/TrainingImageLabel/mail.txt")
if exists1:
tf = open("/Users/harishreddy/MyFiles/Coursera/Facial Attendance System/Tkinter-1/TrainingImageLabel/mail.txt", "r")
receiver = tf.read()
else:
receiver = tsd.askstring('Attendance receiver','Please enter mail ID of the receiver of attendance report')
if receiver == None:
mess._show(title='No Mail Entered', message='Mail not set!! Please try again')
else:
tf = open("/Users/harishreddy/MyFiles/Coursera/Facial Attendance System/Tkinter-1/TrainingImageLabel/mail.txt", "a+")
tf.write(receiver)
mess._show(title='Mail ID of receiver Registered', message='Receiver mail ID registered successfully!!')
date = datetime.date.today().strftime("%B %d, %Y")
path = "/Users/harishreddy/MyFiles/Coursera/Facial Attendance System/Tkinter-1/Attendance_Excel"
os.chdir(path)
files = sorted(os.listdir(os.getcwd()), key=os.path.getmtime)
newest = files[-2]
filename = newest
sub = "Attendance Report for " + str(date)
body='Please find the Attendance report for the day '+date+' of III year IT-A class'
# mail information
yag = yagmail.SMTP("<EMAIL>", "admin#007")
# sent the mail
yag.send(
to=receiver,
subject=sub, # email subject
contents=body, # email body
attachments=filename # file attached
)
mess._show(title='EMAIL SENT Succesfully', message=date+' Attendance mailed to '+receiver)
###################################################################################
def check_haarcascadefile():
exists = os.path.isfile("haarcascade_frontalface_default.xml")
if exists:
pass
else:
mess._show(title='Some file missing', message='Please contact us for help')
window.destroy()
###################################################################################
def save_pass():
assure_path_exists("TrainingImageLabel/")
exists1 = os.path.isfile("TrainingImageLabel/psd.txt")
if exists1:
tf = open("TrainingImageLabel/psd.txt", "r")
key = tf.read()
else:
master.destroy()
new_pas = tsd.askstring('Old Password not found', 'Please enter a new password below', show='*')
if new_pas == None:
mess._show(title='No Password Entered', message='Password not set!! Please try again')
else:
tf = open("TrainingImageLabel/psd.txt", "w")
tf.write(new_pas)
mess._show(title='Password Registered', message='New password was registered successfully!!')
return
op = (old.get())
newp = (new.get())
nnewp = (nnew.get())
if (op == key):
if (newp == nnewp):
txf = open("TrainingImageLabel/psd.txt", "w")
txf.write(newp)
else:
mess._show(title='Error', message='Confirm new password again!!!')
return
else:
mess._show(title='Wrong Password', message='Please enter correct old password.')
return
mess._show(title='Password Changed', message='Password changed successfully!!')
master.destroy()
###################################################################################
def change_pass():
global master
master = tk.Tk()
master.geometry("400x160")
master.resizable(False, False)
master.title("Change Password")
master.configure(background="white")
lbl4 = tk.Label(master, text=' Enter Old Password', bg='white', font=('times', 12, ' bold '))
lbl4.place(x=10, y=10)
global old
old = tk.Entry(master, width=25, fg="black", relief='solid', font=('times', 12, ' bold '), show='*')
old.place(x=180, y=10)
lbl5 = tk.Label(master, text=' Enter New Password', bg='white', font=('times', 12, ' bold '))
lbl5.place(x=10, y=45)
global new
new = tk.Entry(master, width=25, fg="black", relief='solid', font=('times', 12, ' bold '), show='*')
new.place(x=180, y=45)
lbl6 = tk.Label(master, text='Confirm New Password', bg='white', font=('times', 12, ' bold '))
lbl6.place(x=10, y=80)
global nnew
nnew = tk.Entry(master, width=25, fg="black", relief='solid', font=('times', 12, ' bold '), show='*')
nnew.place(x=180, y=80)
cancel = tk.Button(master, text="Cancel", command=master.destroy, fg="black", bg="red", height=1, width=25,
activebackground="white", font=('times', 10, ' bold '))
cancel.place(x=200, y=120)
save1 = tk.Button(master, text="Save", command=save_pass, fg="black", bg="#3ece48", height=1, width=25,
activebackground="white", font=('times', 10, ' bold '))
save1.place(x=10, y=120)
master.mainloop()
#####################################################################################
def psw():
assure_path_exists("TrainingImageLabel/")
exists1 = os.path.isfile("TrainingImageLabel/psd.txt")
if exists1:
tf = open("TrainingImageLabel/psd.txt", "r")
key = tf.read()
else:
new_pas = tsd.askstring('Old Password not found', 'Please enter a new password below', show='*')
if new_pas == None:
mess._show(title='No Password Entered', message='Password not set!! Please try again')
else:
tf = open("TrainingImageLabel/psd.txt", "w")
tf.write(new_pas)
mess._show(title='Password Registered', message='New password registered successfully!!')
password = tsd.askstring('Password', 'Enter Password', show='*')
if (password == key):
TrainImages()
elif (password == None):
pass
else:
mess._show(title='Wrong Password', message='You have entered wrong password')
######################################################################################
def clear():
txt.delete(0, 'end')
res = "1)Take Images >>> 2)Save Profile"
message1.configure(text=res)
def clear2():
txt2.delete(0, 'end')
res = "1)Take Images >>> 2)Save Profile"
message1.configure(text=res)
#######################################################################################
def TakeImages():
check_haarcascadefile()
columns = ['SERIAL NO.', 'ID', 'NAME']
assure_path_exists("StudentDetails/")
assure_path_exists("TrainingImage/")
serial = 0
exists = os.path.isfile("StudentDetails/StudentDetails.csv")
if exists:
with open("StudentDetails/StudentDetails.csv", 'r') as csvFile1:
reader1 = csv.reader(csvFile1)
for l in reader1:
serial = serial + 1
csvFile1.close()
else:
with open("StudentDetails/StudentDetails.csv", 'a+') as csvFile1:
writer = csv.writer(csvFile1)
writer.writerow(columns)
serial = 1
csvFile1.close()
Id = (txt.get())
name = (txt2.get())
if ((name.isalpha()) or (' ' in name)):
cam = cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
sampleNum = 0
while (True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# incrementing sample number
sampleNum = sampleNum + 1
# saving the captured face in the dataset folder TrainingImage
cv2.imwrite("TrainingImage/ " + str(serial)+"."+ Id + '.' + str(sampleNum) + ".jpg",
gray[y:y + h, x:x + w])
# display the frame
cv2.imshow('Taking Images', img)
# wait for 100 miliseconds
if cv2.waitKey(5) & 0xFF == ord('q'):
break
# break if the sample number is morethan 100
elif sampleNum > 100:
break
cam.release()
cv2.destroyAllWindows()
res = "Images Taken for ID : " + Id
row = [serial, Id, name]
with open('StudentDetails/StudentDetails.csv', 'a+') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
message1.configure(text=res)
else:
if (name.isalpha() == False):
res = "Enter Correct name"
message.configure(text=res)
########################################################################################
def TrainImages():
check_haarcascadefile()
assure_path_exists("TrainingImageLabel/")
recognizer = cv2.face_LBPHFaceRecognizer.create()
harcascadePath = "haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
faces, ID = getImagesAndLabels("TrainingImage")
try:
recognizer.train(faces, np.array(ID))
except:
mess._show(title='No Registrations', message='Please Register someone first!!!')
return
recognizer.save("TrainingImageLabel/Trainner.yml")
res = "Profile Saved Successfully"
message1.configure(text=res)
message.configure(text='Registrated Users : ' + str(ID[0]))
#message.configure(text='Registered Users : ' + str(current_serial_no))
############################################################################################3
def getImagesAndLabels(path):
# get the path of all the files in the folder
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
# create empty face list
faces = []
# create empty ID list a Current Serial No.
Ids = []
#current_serial_no=0;
# now looping through all the image paths and loading the Ids and the images
for imagePath in imagePaths:
if imagePath == 'TrainingImage/.DS_Store':
continue
# loading the image and converting it to gray scale
pilImage = Image.open(imagePath).convert('L')
# Now we are converting the PIL image into numpy array
imageNp = np.array(pilImage, 'uint8')
# getting the Id from the image
ID = int(os.path.split(imagePath)[-1].split(".")[0])
# extract the face from the training image sample
faces.append(imageNp)
Ids.append(ID)
#current_serial_no=int(os.path.split(imagePaths[0])[-1].split(".")[0])
return faces, Ids
###########################################################################################
def TrackImages():
check_haarcascadefile()
assure_path_exists("Attendance/")
assure_path_exists("StudentDetails/")
for k in tv.get_children():
tv.delete(k)
msg = ''
i = 0
j = 0
recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()
exists3 = os.path.isfile("TrainingImageLabel/Trainner.yml")
if exists3:
recognizer.read("TrainingImageLabel/Trainner.yml")
else:
mess._show(title='Data Missing', message='Please click on Save Profile to reset data!!')
return
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath);
cam = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Id', 'Name', 'Date', 'Time']
exists1 = os.path.isfile("StudentDetails/StudentDetails.csv")
if exists1:
df = pd.read_csv("StudentDetails/StudentDetails.csv")
else:
mess._show(title='Details Missing', message='Students details are missing, please check!')
cam.release()
cv2.destroyAllWindows()
window.destroy()
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
serial, conf = recognizer.predict(gray[y:y + h, x:x + w])
if (conf < 50):
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = df.loc[df['SERIAL NO.'] == serial]['NAME'].values
ID = df.loc[df['SERIAL NO.'] == serial]['ID'].values
ID = str(ID)
ID = ID[2:-2]
bb = str(aa)
bb = bb[2:-2]
attendance = [ID, bb, str(date), str(timeStamp)]
else:
ID = bb= 'Unknown'
try:
cv2.putText(im, ID+" : "+bb, (x, y + h), font, 1, (255, 255, 255), 2)
except:
ID='Unknown'
cv2.putText(im, ID + " : " + bb, (x, y + h), font, 1, (255, 255, 255), 2)
cv2.imshow('Taking Attendance', im)
if cv2.waitKey(1) == ord('q'):
break
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
exists = os.path.isfile("Attendance/Attendance_" + date + ".csv")
if exists:
with open("Attendance/Attendance_" + date + ".csv", 'a+') as csvFile1:
try:
writer = csv.writer(csvFile1)
writer.writerow(attendance)
except:
attendance = ['NO ONE', 'NO ONE', str(date), str(datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S'))]
csvFile1.close()
else:
with open("Attendance/Attendance_" + date + ".csv", 'a+') as csvFile1:
writer = csv.writer(csvFile1)
writer.writerow(col_names)
writer.writerow(attendance)
csvFile1.close()
#After Taking attendance showing the details row by row on Treeview
with open("Attendance/Attendance_" + date + ".csv", 'r') as csvFile1:
reader1 = csv.reader(csvFile1)
for lines in reader1:
i = i + 1
if (i > 1):
iidd = str(lines[0])
tv.insert('', 0, text=iidd, values=(str(lines[1]), str(lines[2]), str(lines[3])))
# print(iidd)
# print(lines)
# Reading the csv file to a DF
df_new = pd.read_csv("Attendance/Attendance_" + date | |
<reponame>inuitwallet/plunge_android
import json
from kivy.app import App
from kivy.config import ConfigParser
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.slider import Slider
from kivy.metrics import dp
from kivy.uix.settings import SettingString, SettingSpacer, SettingNumeric, InterfaceWithTabbedPanel, Settings
from kivy.uix.textinput import TextInput
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.spinner import Spinner
from kivy.network.urlrequest import UrlRequest
import utils
import logging
__author__ = 'woolly_sammoth'
class InterfaceWithCloseButton(InterfaceWithTabbedPanel):
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
self.tabbedpanel.default_tab_text = 'Plunge Configuration'
self.tabbedpanel.default_tab_content = scrollview
self.tabbedpanel.tab_width = 0.000001
class SettingsWithCloseButton(Settings):
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithCloseButton
super(SettingsWithCloseButton, self).__init__(*args, **kwargs)
class SettingStringFocus(SettingString):
"""
Overrides the SettingString class to automatically give keyboard focus to the input field of the pop up
"""
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for acept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
textinput.focus = True
textinput.cursor = (1, 3000)
class SettingNumericFocus(SettingNumeric):
"""
Overrides the SettingNumeric class to automatically give keyboard focus to the input field of the pop up
"""
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for acept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
textinput.focus = True
textinput.cursor = (1, 3000)
class SettingStringExchange(SettingString):
"""
Overrides the SettingString class to provide a customised popup suitable for exchange data input
"""
num_rows = 0
exchange = None
chosen_api_key_pair = None
ask_max = None
bid_max = None
utils = utils.utils('')
keys_button = []
address = []
unit = []
rates = []
bot = []
logger = logging.getLogger('Plunge')
currencies = ['btc', 'ltc', 'eur', 'usd', 'ppc']
bots = ['nubot', 'pybot', 'none']
config = ConfigParser()
def on_panel(self, instance, value):
if value is None:
return
self.bind(on_release=self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
self.num_rows = 0
self.keys_button = []
self.address = []
self.unit = []
self.rates = []
self.bot = []
def _validate(self, instance):
with open('user_data.json', 'a+') as user_data:
try:
saved_data = json.load(user_data)
except ValueError:
saved_data = {}
user_data.close()
saved_data[self.exchange] = []
good_records = 0
content = TextInput(multiline=True, text='Saving...', background_color=[0.13725, 0.12157, 0.12549, 0],
foreground_color=[1, 1, 1, 1])
popup = Popup(title='Saving Data for %s' % self.exchange, content=content,
size_hint=(None, None), size=(300, 500))
popup.open()
for x in range(0, self.num_rows, 1):
self.logger.info("saving row %d for %s" % (x+1, self.exchange))
content.text = '%s\nSaving row %d' % (content.text, x+1)
this_row = {}
public, secret = self.get_keys(self.keys_button[x].text)
if public is None or secret is None:
self.logger.warn("API Keys not set correctly")
content.text = '%s\n=> API Keys not set correctly' % content.text
continue
this_row['public'] = public
this_row['secret'] = secret
this_row['address'] = self.address[x].text
if not self.utils.check_checksum(this_row['address']) or not this_row['address'][:1] == 'B':
self.logger.warn("Invalid payout address %s" % this_row['address'])
content.text = '%s\n=> Invalid payout address' % content.text
continue
this_row['unit'] = self.unit[x].text
rates = self.rates[x].text
if "|" not in rates:
self.logger.warn("no rates set")
content.text = '%s\n=> No rates set' % content.text
continue
rate = rates.split(' | ')
this_row['ask'] = rate[0]
this_row['bid'] = rate[1]
if this_row['ask'] == 0.00:
this_row['ask'] = self.ask_max
if this_row['bid'] == 0.00:
this_row['bid'] = self.bid_max
this_row['bot'] = self.bot[x].text
if this_row in saved_data[self.exchange]:
self.logger.warn("data already exists")
content.text = '%s\n=> Data already exists' % content.text
continue
saved_data[self.exchange].append(this_row)
good_records += 1
content.text = '%s\nRow %d saved' % (content.text, x+1)
self.logger.info(str(this_row))
with open('user_data.json', 'w') as user_data:
user_data.write(json.dumps(saved_data))
user_data.close()
content.text = '%s\nData Saved' % content.text
self._dismiss()
value = str(good_records)
self.value = value
def _create_popup(self, instance):
"""
Create the main Exchange popup to which new rows can be added
:param instance:
:return:
"""
self.exchange = self.key
main_layout = BoxLayout(orientation='vertical', spacing='5dp')
scroll_view = ScrollView(do_scroll_x=False)
header = GridLayout(cols=5, spacing='5dp', row_default_height='50dp', row_force_default=True,
size_hint_y=None, height='50dp')
header.add_widget(Label(text='API', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='NBT', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='Cur', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='rates', valign='top', size_hint_x=0.2))
header.add_widget(Label(text='Bot', valign='top', size_hint_x=0.2))
self.content = GridLayout(cols=5, spacing='5dp', row_default_height='50dp', row_force_default=True,
size_hint_x=1, size_hint_y=None)
self.content.bind(minimum_height=self.content.setter('height'))
main_layout.add_widget(header)
scroll_view.add_widget(self.content)
main_layout.add_widget(scroll_view)
self.popup = popup = Popup(
title=self.title, content=main_layout)
# construct the content, widget are used as a spacer
main_layout.add_widget(SettingSpacer())
# buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
btn = Button(text='Add Row')
btn.bind(on_release=self.add_row)
btnlayout.add_widget(btn)
main_layout.add_widget(btnlayout)
self.load_data()
# all done, open the popup !
popup.open()
def load_data(self):
with open('user_data.json', 'a+') as data_file:
try:
data = json.load(data_file)
except ValueError:
data = {}
data_file.close()
if self.exchange not in data:
self.add_row(None)
return
if len(data[self.exchange]) == 0:
self.add_row(None)
return
for datum in data[self.exchange]:
self.add_row(datum)
def add_row(self, instance):
"""
Add a row to the main exchange screen
:param instance:
:return:
"""
self.num_rows += 1
keys_button = Button(text='Keys', size_hint_x=0.2, id='%d' % self.num_rows)
keys_button.bind(on_release=self.enter_keys)
self.content.add_widget(keys_button)
self.keys_button.append(keys_button)
address = TextInput(size_hint_x=0.2, padding=[6, 10, 6, 10],
multiline=False, font_size=18, id='%d' % self.num_rows)
address.bind(text=self.check_address)
self.content.add_widget(address)
self.address.append(address)
unit = Spinner(values=self.currencies, text=self.currencies[0], size_hint_x=0.2, id='%d' % self.num_rows)
self.selected_unit = self.currencies[0]
unit.bind(text=self.set_unit)
self.content.add_widget(unit)
self.unit.append(unit)
rates = Button(text='Rates', size_hint_x=0.2, id='%d' % self.num_rows)
rates.bind(on_release=self.enter_rates)
self.content.add_widget(rates)
self.rates.append(rates)
bot = Spinner(values=self.bots, text=self.bots[0], size_hint_x=0.2, id='%d' % self.num_rows)
self.selected_bot = self.bots[0]
bot.bind(text=self.set_bot)
self.content.add_widget(bot)
self.bot.append(bot)
if isinstance(instance, dict):
keys_button.text = instance['public'][:8] + ' / ' + instance['secret'][:8]
address.text = instance['address']
unit.text = instance['unit']
rates.text = instance['ask'] + ' | ' + instance['bid']
bot.text = instance['bot']
def enter_keys(self, instance):
"""
Show a pop-up in which previously entered api keys can be selected from a drop down
There are edit and add buttons on the bottom which fire other methods
:param instance:
:return:
"""
self.calling_keys_button = instance
content = BoxLayout(orientation='vertical', spacing=10)
top = BoxLayout(orientation='vertical', size_hint=(1, 0.7))
top.add_widget(Label(text='API Key Pair', size_hint=(1, None), height='70dp'))
self.api_key_spinner = Spinner(size_hint=(1, None), height='40dp')
top.add_widget(self.api_key_spinner)
self.api_key_spinner.bind(text=self.enable_edit)
top.add_widget(BoxLayout())
btnlayout = BoxLayout(spacing='5dp', size_hint=(1, 0.15))
btn = Button(text='Ok', size_hint_y=None, height='50dp')
btn.bind(on_release=self.close_api_keys_popup)
btnlayout.add_widget(btn)
btn = Button(text='Cancel', size_hint_y=None, height='50dp')
btn.bind(on_release=self.close_api_keys_popup)
btnlayout.add_widget(btn)
self.edit_keys_button = Button(text='Edit Keys', size_hint_y=None, height='50dp', disabled=True)
self.edit_keys_button.bind(on_release=self.edit_keys)
btnlayout.add_widget(self.edit_keys_button)
self.add_keys_button = Button(text='Add Keys', size_hint_y=None, height='50dp')
self.add_keys_button.bind(on_release=self.add_keys)
btnlayout.add_widget(self.add_keys_button)
content.add_widget(top)
content.add_widget(SettingSpacer())
content.add_widget(btnlayout)
popup_width = min(0.95 * Window.width, dp(500))
self.enter_keys_popup = Popup(title='API Keys', content=content, auto_dismiss=False,
size_hint=(None, None), size=(popup_width, '250dp'))
self.update_api_spinners()
if instance.text != 'Keys':
self.api_key_spinner.text = instance.text
self.enter_keys_popup.open()
def enable_edit(self, instance, value):
"""
The Edit button on the 'enter_api_keys' popup starts disabled.
It is only enabled when a selection is made in the spinner
:param instance:
:param value:
:return:
"""
if value == '':
self.edit_keys_button.disabled = True
else:
self.edit_keys_button.disabled = False
self.edit_keys_button.id = value
self.chosen_api_key_pair = value
def edit_keys(self, instance):
"""
Simply shows the add_keys popup with edit mode enabled
:param instance:
:return:
"""
self.add_keys(instance, True)
def add_keys(self, instance, edit=False):
"""
Show a different pop-up into which api_keys can be entered.
In edit mode the fields are pre-populated and a delete button is shown
:param instance:
:param edit:
:return:
"""
content = BoxLayout(orientation='vertical', spacing=10)
grid = GridLayout(cols=2, spacing=10, size_hint=(1, 0.85))
grid.add_widget(Label(text='Public', size_hint_x=None, width='100dp'))
self.add_public_key = TextInput(size_hint=(1, None), height='40dp')
self.add_public_key.bind(text=self.tab_switch)
grid.add_widget(self.add_public_key)
grid.add_widget(Label(text='Secret', size_hint_x=None, width='100dp'))
self.add_secret_key = TextInput(size_hint=(1, None), height='40dp')
self.add_secret_key.bind(text=self.tab_switch)
grid.add_widget(self.add_secret_key)
btnlayout = BoxLayout(spacing='5dp', size_hint=(1, 0.15))
ok_btn = Button(text='Ok', size_hint_y=None, height='50dp')
ok_btn.bind(on_release=self.save_api_keys)
btnlayout.add_widget(ok_btn)
btn = Button(text='Cancel', size_hint_y=None, height='50dp')
btn.bind(on_release=self.save_api_keys)
btnlayout.add_widget(btn)
self.edit_public, self.edit_secret = None, None
if edit is True:
self.edit_public, self.edit_secret = self.get_keys(instance.id)
if self.edit_public is None and self.edit_secret is None:
return
self.add_public_key.text = self.edit_public
self.add_secret_key.text = self.edit_secret
btn = Button(text='Delete', size_hint_y=None, height='50dp')
btn.bind(on_release=self.delete_api_keys)
btnlayout.add_widget(btn)
content.add_widget(SettingSpacer())
content.add_widget(grid)
content.add_widget(btnlayout)
self.add_keys_popup = Popup(title='Add API | |
"""
Python 2-3 Tree implementation
2-3 Tree is a balanced tree each node of which may contain 2 elements
and 3 references on its children.
Element lookup speed is log2(N) < x < log3(N)
Insertion and deletion is about 2 * log2(N)
See http://en.wikipedia.org/wiki/2-3_tree for more info
2011 by <NAME>
"""
class Pair(object):
# use this class if associative tree (or map) is needed
# over 2-3 tree
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
if type(other) is Pair:
return self.key < other.key
else:
return self.key < other
def __gt__(self, other):
if type(other) is Pair:
return self.key > other.key
else:
return self.key > other
def __eq__(self, other):
if type(other) is Pair:
return self.key == other.key
else:
return self.key == other
return None
def __str__(self):
return 'key: %s, value: %s' % (str(self.key), str(self.value))
def key(self):
return self.key
def val(self):
return self.value
class Node(object):
def __init__(self, v = None, parent = None):
self.values, self.valcnt = None, 0
self.links, self.refcnt = None, 0
self.parent = parent
self.insertValue(v)
def __str__(self):
out = []
if self.values is not None:
for v in self.values:
if v is not None:
out.append(' %s ' % str(v))
return ''.join(out)
else: return 'empty'
def __iter__(self):
if self.values is not None:
for item in self.values:
yield item
def __getlink(self, a):
for idx in xrange(self.valcnt):
if idx is 0:
if a < self.values[idx]: return idx
else:
if self.values[idx - 1] < a < self.values[idx]: return idx
if idx == self.valcnt - 1: return idx + 1
return -1
def __addLink(self, link):
if self.links is None: self.links = [None] * 4
self.links[self.refcnt] = link
self.refcnt += 1
def __insertLink(self, idx, anotherNode):
if self.links is None: self.links = [None] * 4
if idx == 0:
self.links[0],self.links[1],self.links[2], self.links[3] = anotherNode,self.links[0],self.links[1], self.links[2]
elif idx == 1:
self.links[1], self.links[2], self.links[3] = anotherNode, self.links[1], self.links[2]
elif idx == 2:
self.links[2], self.links[3] = anotherNode, self.links[2]
else:
self.links[3] = anotherNode
self.refcnt += 1
def __removeLink(self, idx):
if idx == 0:
self.links[0], self.links[1], self.links[2], self.links[3] = self.links[1], self.links[2], self.links[3], None
elif idx == 1:
self.links[1], self.links[2], self.links[3] = self.links[2], self.links[3], None
elif idx == 2:
self.links[2], self.links[3] = self.links[3], None
else:
self.links[3] = None
self.refcnt -= 1
def __rearrangeLinks(self, a):
""" Rearrange links when adding a new node """
if self.valcnt != 0:
if a < self.values[0] and not self.isLeafNode() and self.refcnt < 3:
# shift all the links to the right when adding new in element
self.__insertLink(0, None)
elif self.valcnt == 2 and self.refcnt == 3 and self.values[self.valcnt - 1] > a > self.values[0]:
# rearrange middle links when adding med element
self.__insertLink(1, None)
def __sort3(self, arr, l):
""" Sort 2 or 3 arrays (very rubost and fast) """
if l >= 2:
if arr[0] > arr[1]: arr[0], arr[1] = arr[1], arr[0]
if l == 3:
if arr[1] > arr[2]: arr[1], arr[2] = arr[2], arr[1]
if arr[0] > arr[1]: arr[0], arr[1] = arr[1], arr[0]
# interface methods & properties
def insertValue(self, a):
""" Insert a value into node """
if a is not None and self.valcnt < 3:
if self.valcnt is 0: self.values = [None] * 3
self.__rearrangeLinks(a)
self.values[self.valcnt] = a
self.valcnt += 1
self.__sort3(self.values, self.valcnt)
return self
def removeValue(self, val):
""" Remove value from node """
if self.contains(val):
idx = self.values.index(val)
if idx == 0:
self.values[0], self.values[1], self.values[2] = self.values[1], self.values[2], None
elif idx == 1:
self.values[1], self.values[2] = self.values[2], None
else:
self.values[2] = None
self.valcnt -= 1
return self
def removeLink(self, node):
""" Remove link from self to another node """
self.__removeLink(self.getLinkIdx(node))
return self
def isConsistent(self):
""" Check whether the node is consistent, this means it doesn't contain 3 items or 4 links """
return not (self.valcnt > 2 or self.refcnt > 3)
def isLeafNode(self):
""" Check whether this is a leaf node or not """
return self.refcnt == 0
def isEmptyNode(self):
""" Returns true if node doesn't containt any value """
return self.valcnt == 0
def getLink(self, linkIdx):
""" Get link by its index, return None if there is no link with such an index """
if linkIdx < self.refcnt:
return self.links[linkIdx]
def getLinkIdx(self, destNode):
""" Get index of the link which points to the given node """
return self.links.index(destNode)
def addLink(self, anotherNode):
""" Add link to another node """
if anotherNode is not None:
if self.links is None: self.links = [None] * 4
idx = self.__getlink(anotherNode.values[0])
if idx != -1:
if idx < self.refcnt and self.links[idx] is None:
self.links[idx] = anotherNode
else:
self.__insertLink(idx, anotherNode)
anotherNode.parent = self
return self
def contains(self, a):
""" Check if node contains a given value """
if self.valcnt is not 0:
if (self.values[0] > a or self.values[self.valcnt - 1] < a) or a not in self.values:
return None
return self.values[self.values.index(a)]
def chooseChild(self, a):
""" Choose where to go according to the value a """
idx = self.__getlink(a)
if 0 <= idx < self.refcnt:
return self.links[idx]
def getItem(self, a):
if self.contains(a):
return self.values[self.values.index(a)]
return None
class TTTree(object):
def __init__(self):
self.root = Node()
self.lastSearchDepth = 0
def __iter__(self):
stack = [self.root]
while len(stack):
node = stack.pop()
yield node
for j in xrange(node.refcnt - 1, -1, -1):
stack.append(node.getLink(j))
def __str__(self):
""" String representation of a tree (parentheses form) """
out, stack = [], [self.root]
while stack:
node = stack.pop()
if node == ')':
out.append(')')
continue
out.append('%s(' % str(node))
stack.append(')')
for j in xrange(node.refcnt - 1, -1, -1):
stack.append(node.getLink(j))
return ''.join(out)
def __nextSucc(self, node):
self.lastSearchDepth += 1
if not node.isLeafNode():
return self.__nextSucc(node.links[0])
return node
def __find(self, curNode, a):
if curNode is not None:
if curNode.contains(a):
return curNode
nextNode = curNode.chooseChild(a)
if nextNode is None:
return curNode
self.lastSearchDepth += 1
return self.__find(nextNode, a)
def __getLeftSibling(self, node):
""" Returns left sibling of a node """
if (node and node.parent) is not None:
return node.parent.getLink(node.parent.getLinkIdx(node) - 1)
def __getRightSibling(self, node):
""" Returns right sibling of a node """
if (node and node.parent) is not None:
return node.parent.getLink(node.parent.getLinkIdx(node) + 1)
def __getSiblings(self, node):
""" Returns node's siblings """
# check whether one of our siblings has two items
lS, rS, lCnt, rCnt = None, None, 0, 0
if self.__getRightSibling(node) is not None:
rS = self.__getRightSibling(node)
rCnt = rS.valcnt
if self.__getLeftSibling(node) is not None:
lS = self.__getLeftSibling(node)
lCnt = lS.valcnt
return lS, lCnt, rS, rCnt
def __swapValues(self, node1, a1, node2, a2):
""" Swap any two values in nodes """
if node1 is not node2:
idx1, idx2 = node1.values.index(a1), node2.values.index(a2)
node1.values[idx1], node2.values[idx2] = node2.values[idx2], node1.values[idx1]
def __fixNodeRemove(self, node, parent = -1):
""" Fix deletion """
if node.isEmptyNode():
if node is not self.root:
if parent == -1:
parent = node.parent
if node.isEmptyNode() or not node.isConsistent():
lS, lCnt, rS, rCnt = self.__getSiblings(node)
rSS, lSS = self.__getRightSibling(rS), self.__getLeftSibling(lS)
redistribute = True
if (rS or lS) is not None:
if rCnt == 2 or (rCnt == 1 and rSS != None and rSS.valcnt == 2):
sib = rS
elif lCnt == 2 or (lCnt == 1 and lSS != None and lSS.valcnt == 2):
sib = lS
elif lCnt == 1:
sib, redistribute = lS, False
elif rCnt == 1:
sib, redistribute = rS, False
if redistribute:
# case 1: redistribute
# left and right case
if parent.valcnt == 1:
if node == parent.getLink(0):
parent_val, sib_val = parent.values[0], sib.values[0]
child = sib.chooseChild(sib_val - 1)
elif node == parent.getLink(1):
parent_val, sib_val = parent.values[parent.valcnt - 1], sib.values[sib.valcnt - 1]
child = sib.chooseChild(sib_val + 1)
else:
if sib == parent.getLink(1):
# left
if node == parent.getLink(0):
parent_val, sib_val = parent.values[0], sib.values[0]
child = sib.chooseChild(sib_val - 1)
# right
elif node == parent.getLink(2):
parent_val, sib_val = parent.values[parent.valcnt - 1], sib.values[sib.valcnt - 1]
child = sib.chooseChild(sib_val + 1)
# middle
elif sib == parent.getLink(2):
parent_val, sib_val = parent.values[parent.valcnt - 1], sib.values[0]
child = sib.chooseChild(sib_val - 1)
elif sib | |
"▁yuk": 28792,
"▁Úr": 28793,
"anam": 28794,
"kz": 28795,
"laid": 28796,
"shti": 28797,
"sional": 28798,
"\x92": 28799,
"łów": 28800,
"▁Abdurrahman": 28801,
"▁Bele": 28802,
"▁Coruña": 28803,
"▁Dating": 28804,
"▁Freude": 28805,
"▁Gondol": 28806,
"▁Karya": 28807,
"▁Tibb": 28808,
"▁Yanga": 28809,
"▁Yazı": 28810,
"▁arm": 28811,
"▁arra": 28812,
"▁coast": 28813,
"▁gul": 28814,
"▁hands": 28815,
"▁ingen": 28816,
"▁maximus": 28817,
"▁mild": 28818,
"▁porto": 28819,
"▁servo": 28820,
"▁unde": 28821,
"ち": 28822,
"ライ": 28823,
"부": 28824,
"Bild": 28825,
"Document": 28826,
"dependence": 28827,
"laina": 28828,
"lón": 28829,
"rein": 28830,
"rezza": 28831,
"ındır": 28832,
"▁COL": 28833,
"▁Roskilde": 28834,
"▁Shp": 28835,
"▁Vaa": 28836,
"▁paradis": 28837,
"▁sel": 28838,
"▁wrong": 28839,
"如": 28840,
"知": 28841,
"CĂ": 28842,
"PIS": 28843,
"adon": 28844,
"duh": 28845,
"gaba": 28846,
"ikko": 28847,
"licit": 28848,
"mış": 28849,
"pka": 28850,
"stede": 28851,
"strik": 28852,
"tiden": 28853,
"trau": 28854,
"zino": 28855,
"▁Ioannis": 28856,
"▁Mesto": 28857,
"▁Moet": 28858,
"▁Organisation": 28859,
"▁Would": 28860,
"▁dorm": 28861,
"▁dul": 28862,
"▁fren": 28863,
"▁gravi": 28864,
"▁patch": 28865,
"▁select": 28866,
"▁sg": 28867,
"▁shape": 28868,
"▁uk": 28869,
"曲": 28870,
"ABE": 28871,
"CLE": 28872,
"DEN": 28873,
"ELE": 28874,
"MENT": 28875,
"Magazin": 28876,
"OVO": 28877,
"blis": 28878,
"hatra": 28879,
"lawan": 28880,
"lna": 28881,
"mbali": 28882,
"nawi": 28883,
"schlag": 28884,
"ulit": 28885,
"üş": 28886,
"ča": 28887,
"ʃ": 28888,
"▁Deel": 28889,
"▁Ethiopian": 28890,
"▁Kje": 28891,
"▁Tecno": 28892,
"▁boro": 28893,
"▁burg": 28894,
"▁catch": 28895,
"▁document": 28896,
"▁heel": 28897,
"▁horse": 28898,
"▁nov": 28899,
"▁television": 28900,
"▁vc": 28901,
"사": 28902,
"BEL": 28903,
"aad": 28904,
"anović": 28905,
"edett": 28906,
"ilah": 28907,
"jis": 28908,
"malai": 28909,
"padu": 28910,
"patr": 28911,
"pí": 28912,
"stici": 28913,
"är": 28914,
"ış": 28915,
"że": 28916,
"▁Padre": 28917,
"▁Spray": 28918,
"▁Tack": 28919,
"▁Waa": 28920,
"▁kronor": 28921,
"▁really": 28922,
"▁stea": 28923,
"▁whole": 28924,
"玄": 28925,
"魏": 28926,
"HOR": 28927,
"endre": 28928,
"fers": 28929,
"forza": 28930,
"gation": 28931,
"giz": 28932,
"inti": 28933,
"téri": 28934,
"unun": 28935,
"veren": 28936,
"wg": 28937,
"wki": 28938,
"yolu": 28939,
"ček": 28940,
"ğa": 28941,
"ża": 28942,
"▁Akademik": 28943,
"▁Auditor": 28944,
"▁Auf": 28945,
"▁GLOBAL": 28946,
"▁Tages": 28947,
"▁Zahl": 28948,
"▁college": 28949,
"▁featuring": 28950,
"▁followers": 28951,
"▁nodi": 28952,
"▁ort": 28953,
"▁portable": 28954,
"▁vesi": 28955,
"シ": 28956,
"沙": 28957,
"Ef": 28958,
"INGS": 28959,
"Oxford": 28960,
"Una": 28961,
"ZAL": 28962,
"function": 28963,
"keun": 28964,
"ljak": 28965,
"option": 28966,
"▁Bjarna": 28967,
"▁Deporte": 28968,
"▁Oktyabr": 28969,
"▁Peda": 28970,
"▁Stellenbosch": 28971,
"▁fame": 28972,
"▁grande": 28973,
"▁interest": 28974,
"▁kel": 28975,
"▁kl": 28976,
"▁liquid": 28977,
"▁measure": 28978,
"▁pâ": 28979,
"▁safari": 28980,
"▁those": 28981,
"端": 28982,
"03.20": 28983,
"HUN": 28984,
"edel": 28985,
"hesi": 28986,
"irani": 28987,
"irin": 28988,
"jab": 28989,
"ný": 28990,
"stav": 28991,
"uré": 28992,
"varu": 28993,
"▁(37)": 28994,
"▁IPL": 28995,
"▁Juventud": 28996,
"▁Leta": 28997,
"▁Podcast": 28998,
"▁Tomislav": 28999,
"▁exit": 29000,
"▁located": 29001,
"▁quality": 29002,
"▁reaction": 29003,
"▁scal": 29004,
"▁sera": 29005,
"▁until": 29006,
"▁vibra": 29007,
"仲": 29008,
"佳": 29009,
"智": 29010,
"秀": 29011,
"萬": 29012,
"IVO": 29013,
"Shirt": 29014,
"brev": 29015,
"dada": 29016,
"miento": 29017,
"missions": 29018,
"nize": 29019,
"toka": 29020,
"şık": 29021,
"▁Agen": 29022,
"▁Nijmegen": 29023,
"▁Węg": 29024,
"▁cole": 29025,
"▁former": 29026,
"▁kab": 29027,
"▁merr": 29028,
"▁mile": 29029,
"▁seminar": 29030,
"尹": 29031,
"郭": 29032,
"Forum": 29033,
"LITE": 29034,
"dique": 29035,
"dnik": 29036,
"eeda": 29037,
"hlas": 29038,
"kopp": 29039,
"ojo": 29040,
"schot": 29041,
"önü": 29042,
"▁Faci": 29043,
"▁Rasmus": 29044,
"▁Revolu": 29045,
"▁SEB": 29046,
"▁Warga": 29047,
"▁difficili": 29048,
"▁roto": 29049,
"▁several": 29050,
"▁words": 29051,
"▁xir": 29052,
"刘": 29053,
"名": 29054,
"Ez": 29055,
"Jag": 29056,
"farge": 29057,
"kkan": 29058,
"punan": 29059,
"siu": 29060,
"vag": 29061,
"π": 29062,
"▁Miel": 29063,
"▁Preto": 29064,
"▁bhu": 29065,
"▁collect": 29066,
"▁euch": 29067,
"▁golden": 29068,
"▁joc": 29069,
"▁outside": 29070,
"▁pă": 29071,
"▁raff": 29072,
"▁ras": 29073,
"▁trial": 29074,
"恩": 29075,
"............": 29076,
"INFO": 29077,
"adat": 29078,
"chado": 29079,
"hb": 29080,
"isella": 29081,
"stuhl": 29082,
"udhu": 29083,
"öt": 29084,
"▁16.00": 29085,
"▁1800-": 29086,
"▁Datu": 29087,
"▁Ideal": 29088,
"▁Katar": 29089,
"▁Opinion": 29090,
"▁Weiß": 29091,
"▁antra": 29092,
"▁hiki": 29093,
"▁hug": 29094,
"▁inf": 29095,
"▁interface": 29096,
"▁longe": 29097,
"▁pages": 29098,
"▁tana": 29099,
"▁Šv": 29100,
"...........": 29101,
"ILAN": 29102,
"Prav": 29103,
"brani": 29104,
"computer": 29105,
"ekli": 29106,
"fê": 29107,
"lej": 29108,
"plete": 29109,
"sku": 29110,
"▁Halk": 29111,
"▁Praia": 29112,
"▁articula": 29113,
"▁choice": 29114,
"▁create": 29115,
"▁disease": 29116,
"▁india": 29117,
"▁married": 29118,
"▁orang": 29119,
"▁pag": 29120,
"▁perc": 29121,
"▁pok": 29122,
"▁processo": 29123,
"▁rog": 29124,
"▁separat": 29125,
"▁sind": 29126,
"▁vision": 29127,
"化": 29128,
"아": 29129,
"amb": 29130,
"eit": 29131,
"erit": 29132,
"riez": 29133,
"tanda": 29134,
"tiques": 29135,
"▁BEST": 29136,
"▁Deze": 29137,
"▁Latest": 29138,
"▁Pekka": 29139,
"▁centro": 29140,
"▁department": 29141,
"▁fumar": 29142,
"▁kool": 29143,
"▁millenni": 29144,
"▁nature": 29145,
"▁nemo": 29146,
"▁sample": 29147,
"將": 29148,
"DHA": 29149,
"LEC": 29150,
"alphabet": 29151,
"kru": 29152,
"legi": 29153,
"tribute": 29154,
"zala": 29155,
"ßen": 29156,
"▁1395": 29157,
"▁AMB": 29158,
"▁Dessa": 29159,
"▁Hehe": 29160,
"▁Lider": 29161,
"▁Schä": 29162,
"▁Tark": 29163,
"▁bond": 29164,
"▁dic": 29165,
"▁manu": 29166,
"▁mast": 29167,
"▁nec": 29168,
"▁stories": 29169,
"▁truc": 29170,
"▁Çift": 29171,
"千": 29172,
"音": 29173,
"지": 29174,
"후": 29175,
"Tapi": 29176,
"baran": 29177,
"bool": 29178,
"corso": 29179,
"ghas": 29180,
"ilha": 29181,
"kaa": 29182,
"otan": 29183,
"piece": 29184,
"ravin": 29185,
"río": 29186,
"still": 29187,
"tauchen": 29188,
"upan": 29189,
"vle": 29190,
"▁Esperanto": 29191,
"▁Genera": 29192,
"▁Relax": 29193,
"▁TOT": 29194,
"▁argentina": 29195,
"▁estima": 29196,
"▁users": 29197,
"▁victor": 29198,
"官": 29199,
"awal": 29200,
"baga": 29201,
"content": 29202,
"eho": 29203,
"ganga": 29204,
"iski": 29205,
"kling": 29206,
"meel": 29207,
"regel": 29208,
"rø": 29209,
"tě": 29210,
"ňa": 29211,
"▁Araç": 29212,
"▁Drei": 29213,
"▁Lage": 29214,
"▁Monsieur": 29215,
"▁Obst": 29216,
"▁Owner": 29217,
"▁Regul": 29218,
"▁Specific": 29219,
"▁carry": 29220,
"▁fx": 29221,
"▁hemat": 29222,
"▁lago": 29223,
"▁royal": 29224,
"▁sharing": 29225,
"▁shows": 29226,
"七": 29227,
"區": 29228,
"火": 29229,
"STAT": 29230,
"TUN": 29231,
"anana": 29232,
"current": 29233,
"decim": 29234,
"fika": 29235,
"htm": 29236,
"lh": 29237,
"sagen": 29238,
"vim": 29239,
"zare": 29240,
"čice": 29241,
"ʿ": 29242,
"▁2007-2013": 29243,
"▁Balaton": 29244,
"▁Stabil": 29245,
"▁Tokio": 29246,
"▁UEA": 29247,
"▁Unidos": 29248,
"▁Uw": 29249,
"▁Zde": 29250,
"▁comes": 29251,
"▁kommun": 29252,
"▁meri": 29253,
"▁nito": 29254,
"▁novog": 29255,
"▁pest": 29256,
"▁policy": 29257,
"▁shir": 29258,
"▁tapa": 29259,
"▁trek": 29260,
"PUS": 29261,
"aniya": 29262,
"nemu": 29263,
"platte": 29264,
"rée": 29265,
"wier": 29266,
"▁Csaba": 29267,
"▁Religion": 29268,
"▁alan": 29269,
"▁alternat": 29270,
"▁astro": 29271,
"▁auf": 29272,
"▁balsam": 29273,
"▁hall": 29274,
"▁homem": 29275,
"▁honor": 29276,
"▁inde": 29277,
"▁praeter": 29278,
"▁within": 29279,
"つ": 29280,
"기": 29281,
"Alle": 29282,
"UJI": 29283,
"Yiddish": 29284,
"access": 29285,
"accia": 29286,
"article": 29287,
"hany": 29288,
"koa": 29289,
"lagos": 29290,
"ologia": 29291,
"rät": 29292,
"sika": 29293,
"sist": 29294,
"”),": 29295,
"▁Alap": 29296,
"▁Camping": 29297,
"▁Denne": 29298,
"▁Kivi": 29299,
"▁Listo": 29300,
"▁Nieuwe": 29301,
"▁Transilvania": 29302,
"▁Tudo": 29303,
"▁babe": 29304,
"▁century": 29305,
"▁competition": 29306,
"▁mami": 29307,
"▁salva": 29308,
"▁scopo": 29309,
"▁write": 29310,
"anten": 29311,
"issant": 29312,
"qar": 29313,
"ribus": 29314,
"teck": 29315,
"tih": 29316,
"ško": 29317,
"ная": 29318,
"▁Advent": 29319,
"▁Agora": 29320,
"▁Nech": 29321,
"▁Prov": 29322,
"▁cald": 29323,
"▁ef": 29324,
"▁mand": 29325,
"▁risk": 29326,
"▁tok": 29327,
"▁wrap": 29328,
"▁wy": 29329,
"ARIS": 29330,
"Unione": 29331,
"ajat": 29332,
"juma": 29333,
"liter": 29334,
"ndolo": 29335,
"portal": 29336,
"rave": 29337,
"skap": 29338,
"wege": 29339,
"ḥ": 29340,
"▁Gymnas": 29341,
"▁Ofer": 29342,
"▁Ostrava": 29343,
"▁PLAY": 29344,
"▁armen": 29345,
"▁bazo": 29346,
"▁gyn": 29347,
"▁kala": 29348,
"▁liber": 29349,
"▁rest": 29350,
"▁whose": 29351,
"那": 29352,
"雄": 29353,
"齊": 29354,
"AJI": 29355,
"apuram": 29356,
"develop": 29357,
"feder": 29358,
"gita": 29359,
"hanam": 29360,
"ised": 29361,
"kake": 29362,
"oeuf": 29363,
"▁Epidemi": 29364,
"▁Kafe": 29365,
"▁armi": 29366,
"▁eden": 29367,
"▁lub": 29368,
"▁massa": 29369,
"▁Šu": 29370,
"ESTI": 29371,
"ZY": 29372,
"blogger": 29373,
"handle": 29374,
"hü": 29375,
"ilun": 29376,
"karna": 29377,
"kub": 29378,
"lir": 29379,
"poste": 29380,
"rifi": 29381,
"sulat": 29382,
"▁Brasileira": 29383,
"▁Everyone": 29384,
"▁Greece": 29385,
"▁Lajos": 29386,
"▁Meni": 29387,
"▁Sonder": 29388,
"▁cancer": 29389,
"▁egg": 29390,
"▁franchise": 29391,
"▁humil": 29392,
"▁juni": 29393,
"▁poem": 29394,
"▁rubrica": 29395,
"▁seria": 29396,
"▁till": 29397,
"▁utili": 29398,
"▁Çav": 29399,
"▁ə": 29400,
"RIE": 29401,
"abile": 29402,
"dı": 29403,
"economic": 29404,
"eswara": 29405,
"hald": 29406,
"kış": 29407,
"pán": 29408,
"quisition": 29409,
"rías": 29410,
"▁Boleslav": 29411,
"▁Cei": 29412,
"▁Parliament": 29413,
"▁Portfolio": 29414,
"▁Reforma": 29415,
"▁bene": 29416,
"▁chor": 29417,
"▁disa": 29418,
"▁doing": 29419,
"▁gur": 29420,
"▁hou": 29421,
"▁hul": 29422,
"▁investor": 29423,
"▁noir": 29424,
"▁reply": 29425,
"3⁄4": 29426,
"Instagram": 29427,
"balance": 29428,
"delle": 29429,
"dinga": 29430,
"gosto": 29431,
| |
lmr <-> gpcp
indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
lg_csave[k] = np.corrcoef(lmrvec[indok],gpcpvec[indok])[0,1]
else:
lg_csave[k] = np.nan
print(' lmr-gpcp correlation : %s' % str(lg_csave[k]))
# lmr <-> cmap
indok = np.isfinite(cmapvec); nbok = np.sum(indok); nball = cmapvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
lc_csave[k] = np.corrcoef(lmrvec[indok],cmapvec[indok])[0,1]
else:
lc_csave[k] = np.nan
print(' lmr-cmap correlation : %s' % str(lc_csave[k]))
# lmr <-> tcr
indok = np.isfinite(tcrvec); nbok = np.sum(indok); nball = tcrvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
lt_csave[k] = np.corrcoef(lmrvec[indok],tcrvec[indok])[0,1]
else:
lt_csave[k] = np.nan
print(' lmr-tcr correlation : %s' % str(lt_csave[k]))
# lmr <-> era
indok = np.isfinite(eravec); nbok = np.sum(indok); nball = eravec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
le_csave[k] = np.corrcoef(lmrvec[indok],eravec[indok])[0,1]
else:
le_csave[k] = np.nan
print(' lmr-era correlation : %s' % str(le_csave[k]))
# gpcp <-> cmap
indok = np.isfinite(cmapvec); nbok = np.sum(indok); nball = cmapvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
gc_csave[k] = np.corrcoef(gpcpvec[indok],cmapvec[indok])[0,1]
else:
gc_csave[k] = np.nan
print(' gpcp-cmap correlation : %s' % str(gc_csave[k]))
# gpcp <-> tcr
indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
gt_csave[k] = np.corrcoef(gpcpvec[indok],tcrvec[indok])[0,1]
else:
gt_csave[k] = np.nan
print(' gpcp-tcr correlation : %s' % str(gt_csave[k]))
# gpcp <-> era
indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
ge_csave[k] = np.corrcoef(gpcpvec[indok],eravec[indok])[0,1]
else:
ge_csave[k] = np.nan
print(' gpcp-era correlation : %s' % str(ge_csave[k]))
# tcr <-> era
indok = np.isfinite(eravec); nbok = np.sum(indok); nball = eravec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
te_csave[k] = np.corrcoef(tcrvec[indok],eravec[indok])[0,1]
else:
te_csave[k] = np.nan
print(' tcr-era correlation : %s' % str(te_csave[k]))
# -- plots for anomaly correlation statistics --
# number of bins in the histograms
nbins = 15
corr_range = [-0.6,1.0]
bins = np.linspace(corr_range[0],corr_range[1],nbins)
# LMR compared to GPCP, CMAP, TCR and ERA
fig = plt.figure()
# GPCP
ax = fig.add_subplot(4,2,1)
ax.plot(cyears,lg_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - GPCP')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(4,2,2)
ax.hist(lg_csave[~np.isnan(lg_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - GPCP')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lg_csave)),fontsize=11,fontweight='bold')
# CMAP
ax = fig.add_subplot(4,2,3)
ax.plot(cyears,lc_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - CMAP')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(4,2,4)
ax.hist(lc_csave[~np.isnan(lc_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - CMAP')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lc_csave)),fontsize=11,fontweight='bold')
# TCR
ax = fig.add_subplot(4,2,5)
ax.plot(cyears,lt_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(4,2,6)
ax.hist(lt_csave[~np.isnan(lt_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lt_csave)),fontsize=11,fontweight='bold')
# ERA
ax = fig.add_subplot(4,2,7)
ax.plot(cyears,le_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - ERA20C')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(4,2,8)
ax.hist(le_csave[~np.isnan(le_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - ERA20C')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(le_csave)),fontsize=11,fontweight='bold')
fig.tight_layout()
plt.subplots_adjust(left=0.1, bottom=0.25, right=0.95, top=0.93, wspace=0.5, hspace=0.5)
fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold')
if fsave:
print('saving to .png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.pdf', bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# Reference : TCR & ERA compared to GPCP + ERA compared to TCR
fig = plt.figure()
# TCR <-> GPCP
ax = fig.add_subplot(3,2,1)
ax.plot(cyears,gt_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('20CR-V2 - GPCP')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax.set_xlabel('Year CE',fontweight='bold')
#
ax = fig.add_subplot(3,2,2)
ax.hist(gt_csave[~np.isnan(gt_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('20CR-V2 - GPCP')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
ax.set_xlabel('Correlation',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(gt_csave)),fontsize=11,fontweight='bold')
# ERA <-> GPCP
ax = fig.add_subplot(3,2,3)
ax.plot(cyears,ge_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('ERA20C - GPCP')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax.set_xlabel('Year CE',fontweight='bold')
#
ax = fig.add_subplot(3,2,4)
ax.hist(ge_csave[~np.isnan(ge_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('ERA20C - GPCP')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
ax.set_xlabel('Correlation',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(ge_csave)),fontsize=11,fontweight='bold')
# ERA <-> TCR
ax = fig.add_subplot(3,2,5)
ax.plot(cyears,te_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('ERA20C - 20CR-V2')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax.set_xlabel('Year CE',fontweight='bold')
#
ax = fig.add_subplot(3,2,6)
ax.hist(te_csave[~np.isnan(te_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('ERA20C - GPCP')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
ax.set_xlabel('Correlation',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(te_csave)),fontsize=11,fontweight='bold')
fig.tight_layout()
plt.subplots_adjust(left=0.1, bottom=0.35, right=0.95, top=0.93, wspace=0.5, hspace=0.5)
fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold')
if fsave:
print('saving to .png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_'+str(trange[0])+'-'+str(trange[1])+'_reference.png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_'+str(trange[0])+'-'+str(trange[1])+'_reference.pdf', bbox_inches='tight', dpi=300, format='pdf')
plt.close()
#
# BEGIN bias, r and CE calculations
#
# correlation and CE at each (lat,lon) point
lg_err = lmr_allyears - gpcp_allyears
lc_err = lmr_allyears - cmap_allyears
lr_err = lmr_allyears - tcr_allyears
le_err = lmr_allyears - era_allyears
gc_err = gpcp_allyears - cmap_allyears
tg_err = tcr_allyears - gpcp_allyears
eg_err = era_allyears - gpcp_allyears
te_err = tcr_allyears - era_allyears
r_lg = np.zeros([nlat_new,nlon_new])
ce_lg = np.zeros([nlat_new,nlon_new])
r_lc = np.zeros([nlat_new,nlon_new])
ce_lc = np.zeros([nlat_new,nlon_new])
r_lr = np.zeros([nlat_new,nlon_new])
ce_lr = np.zeros([nlat_new,nlon_new])
r_le = np.zeros([nlat_new,nlon_new])
ce_le = np.zeros([nlat_new,nlon_new])
r_gc = np.zeros([nlat_new,nlon_new])
ce_gc = np.zeros([nlat_new,nlon_new])
r_tg = np.zeros([nlat_new,nlon_new])
ce_tg = np.zeros([nlat_new,nlon_new])
r_eg = np.zeros([nlat_new,nlon_new])
ce_eg = np.zeros([nlat_new,nlon_new])
r_te = np.zeros([nlat_new,nlon_new])
ce_te = np.zeros([nlat_new,nlon_new])
# bias
# ...
# CE
ce_lg = coefficient_efficiency(gpcp_allyears,lmr_allyears)
ce_lc = coefficient_efficiency(cmap_allyears,lmr_allyears)
ce_lr = coefficient_efficiency(tcr_allyears,lmr_allyears)
ce_le = coefficient_efficiency(era_allyears,lmr_allyears)
ce_gc = coefficient_efficiency(cmap_allyears,gpcp_allyears)
ce_tg = coefficient_efficiency(gpcp_allyears,tcr_allyears)
ce_eg = coefficient_efficiency(gpcp_allyears,era_allyears)
ce_te = coefficient_efficiency(era_allyears,tcr_allyears)
# Correlation
for la in range(nlat_new):
for lo in range(nlon_new):
# LMR-GPCP
indok = np.isfinite(gpcp_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_lg[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],gpcp_allyears[indok,la,lo])[0,1]
else:
r_lg[la,lo] = np.nan
# LMR-CMAP
indok = np.isfinite(cmap_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_lc[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],cmap_allyears[indok,la,lo])[0,1]
else:
r_lc[la,lo] = np.nan
# LMR-TCR
indok = np.isfinite(tcr_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_lr[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],tcr_allyears[indok,la,lo])[0,1]
else:
r_lr[la,lo] = np.nan
# LMR-ERA
indok = np.isfinite(era_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_le[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],era_allyears[indok,la,lo])[0,1]
else:
r_le[la,lo] = np.nan
# GPCP-CMAP
indok = np.isfinite(cmap_allyears[:,la,lo])
nbok = np.sum(indok)
nball = gpcp_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_gc[la,lo] = np.corrcoef(gpcp_allyears[indok,la,lo],cmap_allyears[indok,la,lo])[0,1]
else:
r_gc[la,lo] = np.nan
# GPCP-TCR
indok = np.isfinite(gpcp_allyears[:,la,lo])
nbok = np.sum(indok)
nball = tcr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_tg[la,lo] = np.corrcoef(gpcp_allyears[indok,la,lo],tcr_allyears[indok,la,lo])[0,1]
else:
r_tg[la,lo] = np.nan
# GPCP-ERA
indok = np.isfinite(gpcp_allyears[:,la,lo])
nbok = np.sum(indok)
nball = era_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_eg[la,lo] = np.corrcoef(gpcp_allyears[indok,la,lo],era_allyears[indok,la,lo])[0,1]
else:
r_eg[la,lo] = np.nan
# ERA-TCR
indok = np.isfinite(era_allyears[:,la,lo])
nbok = np.sum(indok)
nball = era_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_te[la,lo] = np.corrcoef(era_allyears[indok,la,lo],tcr_allyears[indok,la,lo])[0,1]
else:
r_te[la,lo] = np.nan
# median
# ------
lat_trunc = np.squeeze(lat2_new[:,0])
indlat = np.where((lat_trunc[:] > -60.0) & (lat_trunc[:] < 60.0))
# LMR-GPCP
print('')
lg_rmedian = str(float('%.2g' % np.median(np.median(r_lg)) ))
print('lmr-gpcp all-grid median r : %s' % str(lg_rmedian))
lg_rmedian60 = str(float('%.2g' % np.median(np.median(r_lg[indlat,:])) ))
print('lmr-gpcp 60S-60N median r : %s' % str(lg_rmedian60))
lg_cemedian = str(float('%.2g' % np.median(np.median(ce_lg)) ))
print('lmr-gpcp all-grid median ce : %s' % str(lg_cemedian))
lg_cemedian60 = str(float('%.2g' % np.median(np.median(ce_lg[indlat,:])) ))
print('lmr-gpcp 60S-60N median ce : %s' % str(lg_cemedian60))
# LMR-CMAP
print('')
lc_rmedian = str(float('%.2g' % np.median(np.median(r_lc)) ))
print('lmr-cmap all-grid median r : ' + str(lc_rmedian))
lc_rmedian60 = str(float('%.2g' % np.median(np.median(r_lc[indlat,:])) ))
print('lmr-cmap 60S-60N median r : ' + str(lc_rmedian60))
lc_cemedian = str(float('%.2g' % np.median(np.median(ce_lc)) ))
print('lmr-cmap all-grid median ce : ' + str(lc_cemedian))
lc_cemedian60 = str(float('%.2g' % np.median(np.median(ce_lc[indlat,:])) ))
print('lmr-cmap 60S-60N median ce : ' + str(lc_cemedian60))
# LMR-TCR
print('')
lr_rmedian = str(float('%.2g' % np.median(np.median(r_lr)) ))
print('lmr-tcr all-grid median r : ' + str(lr_rmedian))
lr_rmedian60 = str(float('%.2g' % np.median(np.median(r_lr[indlat,:])) ))
print('lmr-tcr 60S-60N median r : ' + str(lr_rmedian60))
lr_cemedian = str(float('%.2g' % np.median(np.median(ce_lr)) ))
print('lmr-tcr all-grid median ce : ' + str(lr_cemedian))
lr_cemedian60 = str(float('%.2g' % np.median(np.median(ce_lr[indlat,:])) ))
print('lmr-tcr 60S-60N median ce : ' + str(lr_cemedian60))
# LMR-ERA
print('')
le_rmedian = str(float('%.2g' % np.median(np.median(r_le)) ))
print('lmr-era all-grid median r : ' + str(le_rmedian))
le_rmedian60 = str(float('%.2g' % np.median(np.median(r_le[indlat,:])) ))
print('lmr-era 60S-60N median r : ' + str(le_rmedian60))
le_cemedian = str(float('%.2g' % np.median(np.median(ce_le)) ))
print('lmr-era all-grid median ce : ' + str(le_cemedian))
le_cemedian60 = str(float('%.2g' % np.median(np.median(ce_le[indlat,:])) ))
print('lmr-era 60S-60N median ce : ' + str(le_cemedian60))
# GPCP-CMAP
print('')
gc_rmedian = str(float('%.2g' % np.median(np.median(r_gc)) ))
print('gpcp-cmap all-grid median r : ' + | |
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
connection = httplib.HTTPConnection(host, port)
connection.request("POST", pathinfo, params, headers)
# Status
response = connection.getresponse()
print("Status: " + str(response.status), "Reason: " + str(response.reason))
# Server job location (URL)
location = response.getheader("location")
print("Location: " + location)
# Jobid
jobid = location[location.rfind('/')+1:]
print("Job id: " + jobid)
connection.close()
# -------------------------------------
# Check job status, wait until finished
tcount = 0
while True:
connection = httplib.HTTPConnection(host, port)
connection.request("GET", pathinfo+"/"+jobid)
response = connection.getresponse()
data = response.read()
# XML response: parse it to obtain the current status
dom = parseString(data)
if use_mirror:
phaseElement = dom.getElementsByTagName('phase')[0]
else:
phaseElement = dom.getElementsByTagName('uws:phase')[0]
phaseValueElement = phaseElement.firstChild
phase = phaseValueElement.toxml()
print("Status: " + phase)
# Check finished
if phase == 'COMPLETED':
break
#wait and repeat
time.sleep(0.2)
tcount += 0.2
if (phase == 'ERROR') | (tcount > max_wait):
return False
# print "Data:"
# print data
connection.close()
# -------------------------------------
# Get results
connection = httplib.HTTPConnection(host, port)
connection.request("GET", pathinfo+"/"+jobid+"/results/result")
response = connection.getresponse()
data = response.read()
outputFileName = output_file + (not use_mirror)*".gz"
try:
outputFile = open(outputFileName, "w")
outputFile.write(data)
except:
# Python 3
outputFile = open(outputFileName, "wb")
outputFile.write(data)
outputFile.close()
connection.close()
print("Data saved in: " + outputFileName)
if not use_mirror:
# ESA archive returns gzipped
try:
os.remove(output_file)
except:
pass
os.system('gunzip {output_file}.gz'.format(output_file=output_file))
table = Table.read(output_file, format='fits')
return table
def gen_tap_box_query(ra=165.86, dec=34.829694, radius=3., corners=None, max=100000, db='ls_dr7.tractor_primary', columns=['*'], rd_colnames=['ra', 'dec'], wcs_pad=0.5):
"""
Generate a query string for the NOAO Legacy Survey TAP server
Parameters
----------
ra, dec : float
RA, Dec in decimal degrees
radius : float
Search radius, in arc-minutes.
corners : 4-tuple, `~astropy.wcs.WCS` or None
ra_min, ra_max, dec_min, dec_max of a query box to use instead of
`radius`. Or if a `~astropy.wcs.WCS` object, get limits from the
`~astropy.wcs.WCS.calc_footprint` method
Returns
-------
query : str
Query string
"""
rmi = radius/60/2
cosd = np.cos(dec/180*np.pi)
if max is not None:
maxsel = 'TOP {0}'.format(max)
else:
maxsel = ''
if corners is not None:
if hasattr(corners, 'calc_footprint'):
foot = corners.calc_footprint()
left = foot[:,0].min()
right = foot[:,0].max()
bottom = foot[:,1].min()
top = foot[:,1].max()
dx = (right-left)
dy = (top-bottom)
left -= wcs_pad*dx
right += wcs_pad*dx
bottom -= wcs_pad*dy
top += wcs_pad*dy
elif len(corners) != 4:
msg = 'corners needs 4 values (ra_min, ra_max, dec_min, dec_max)'
raise ValueError(msg)
else:
left, right, bottom, top = corners
else:
left = ra - rmi / cosd
right = ra + rmi / cosd
bottom = dec - rmi
top = dec + rmi
fmt = dict(rc=rd_colnames[0], dc=rd_colnames[1],
left=left, right=right,
top=top, bottom=bottom,
maxsel=maxsel,
db=db,
output_columns=', '.join(columns))
if not np.isfinite(ra+dec):
query = "SELECT {maxsel} {output_columns} FROM {db} "
else:
query = ("SELECT {maxsel} {output_columns} FROM {db} WHERE " +
"{rc} > {left} AND {rc} < {right} AND " +
"{dc} > {bottom} AND {dc} < {top} ")
return query.format(**fmt)
def query_tap_catalog(ra=165.86, dec=34.829694, radius=3., corners=None,
max_wait=20,
db='ls_dr9.tractor', columns=['*'], extra='',
rd_colnames=['ra', 'dec'],
tap_url='https://datalab.noirlab.edu/tap',
max=1000000, clean_xml=True, verbose=True,
des=False, gaia=False, nsc=False, vizier=False,
skymapper=False,
hubble_source_catalog=False, tap_kwargs={},
**kwargs):
"""Query NOAO Catalog holdings
Parameters
----------
ra, dec : float
Center of the query region, decimal degrees
radius : float
Radius of the query, in arcmin
corners : 4-tuple, `~astropy.wcs.WCS` or None
ra_min, ra_max, dec_min, dec_max of a query box to use instead of
`radius`. Or if a `WCS` object, get limits from the
`~astropy.wcs.WCS.calc_footprint` method
db : str
Parent database (https://datalab.noirlab.edu/query.php).
columns : list of str
List of columns to output. Default ['*'] returns all columns.
extra : str
String to add to the end of the positional box query, e.g.,
'AND mag_auto_i > 16 AND mag_auto_i < 16.5'.
rd_colnames : str, str
Column names in `db` corresponding to ra/dec (degrees).
tap_url : str
TAP hostname
des : bool
Query `des_dr1.main` from NOAO.
gaia : bool
Query `gaiadr2.gaia_source` from http://gea.esac.esa.int.
nsc : bool
Query the NOAO Source Catalog (Nidever et al. 2018), `nsc_dr1.object`.
vizier : bool
Use the VizieR TAP server at http://tapvizier.u-strasbg.fr/TAPVizieR/tap, see http://tapvizier.u-strasbg.fr/adql/about.html.
hubble_source_catalog : bool
Query the Hubble Source Catalog (v3). If no 'NumImages' criteria is
found in `extra`, then add an additional requirement:
>>> extra += 'AND NumImages > 1'
Returns
-------
table : `~astropy.table.Table`
Result of the query
"""
from astroquery.utils.tap.core import TapPlus
# DES DR1
if des:
if verbose:
print('Query DES DR1 from NOAO')
db = 'des_dr1.main'
tap_url = 'https://datalab.noirlab.edu/tap'
# NOAO source catalog, seems to have some junk
if nsc:
if verbose:
print('Query NOAO source catalog')
db = 'nsc_dr1.object'
tap_url = 'https://datalab.noirlab.edu/tap'
extra += ' AND nsc_dr1.object.flags = 0'
# GAIA DR2
if gaia:
if verbose:
print('Query GAIA DR2 from ESA')
db = 'gaiadr2.gaia_source'
tap_url = 'http://gea.esac.esa.int/tap-server/tap'
# VizieR TAP server
if vizier:
if verbose:
print('Query {0} from VizieR TAP server'.format(db))
tap_url = 'http://tapvizier.u-strasbg.fr/TAPVizieR/tap'
rd_colnames = ['RAJ2000', 'DEJ2000']
if skymapper:
if verbose:
print('Query {0} from VizieR TAP server'.format(db))
tap_url = 'http://tapvizier.u-strasbg.fr/TAPVizieR/tap'
rd_colnames = ['RAICRS', 'DEICRS']
if hubble_source_catalog:
if db is None:
db = 'dbo.SumPropMagAutoCat'
elif 'dbo' not in db:
db = 'dbo.SumPropMagAutoCat'
tap_url = 'http://vao.stsci.edu/HSCTAP/tapservice.aspx'
rd_colnames = ['MatchRA', 'MatchDec']
if 'NumImages' not in extra:
extra += 'AND NumImages > 1'
tap = TapPlus(url=tap_url, **tap_kwargs)
query = gen_tap_box_query(ra=ra, dec=dec, radius=radius, max=max,
db=db, columns=columns,
rd_colnames=rd_colnames,
corners=corners)
job = tap.launch_job(query+extra, dump_to_file=True, verbose=verbose)
try:
table = job.get_results()
if clean_xml:
if hasattr(job, 'outputFile'):
jobFile = job.outputFile
else:
jobFile = job.get_output_file()
os.remove(jobFile)
# Provide ra/dec columns
for c, cc in zip(rd_colnames, ['ra', 'dec']):
if (c in table.colnames) & (cc not in table.colnames):
table[cc] = table[c]
table.meta['TAPURL'] = tap_url, 'TAP URL'
table.meta['TAPDB'] = db, 'TAP database name'
table.meta['TAPQUERY'] = query+extra, 'TAP query'
table.meta['RAQUERY'] = ra, 'Query central RA'
table.meta['DECQUERY'] = dec, 'Query central Dec'
table.meta['RQUERY'] = radius, 'Query radius, arcmin'
if hubble_source_catalog:
for col in table.colnames:
if table[col].dtype == 'object':
print('Reformat column: {0}'.format(col))
strcol = list(table[col])
table[col] = strcol
except:
if hasattr(job, 'outputFile'):
jobFile = job.outputFile
else:
jobFile = job.get_output_file()
print('Query failed, check {0} for error messages'.format(jobFile))
table = None
return table
# Limit Hubble Source Catalog query to brighter sources in limited bands
HSCv3_FILTER_LIMITS = {'W3_F160W': 23.5,
'W3_F140W': 23.5,
'W3_F125W': 23.5,
'W3_F110W': 23.5,
'W3_F098M': 23.5,
'W3_F105W': 23.5,
'A_F814W': 23.5,
'W3_F814W': 23.5,
'A_F606W': 23.5,
'W3_F606W': 23.5,
'A_F850LP': 23.5,
'W3_F850LP': 23.5,
'A_F775W': 23.5,
'W3_F775W': 23.5}
HSCv3_COLUMNS = ['MatchRA', 'MatchDec', 'CI', 'CI_Sigma',
'KronRadius', 'KronRadius_Sigma', 'Extinction',
'TargetName', 'NumImages', 'NumFilters', 'NumVisits',
'DSigma']
def get_hubble_source_catalog(ra=0., dec=0., radius=3, corners=None, max=int(1e7), extra=' AND NumImages > 0', kron_max=0.45, dsigma_max=100, clip_singles=10*u.arcsec, verbose=True, columns=HSCv3_COLUMNS, filter_limits=HSCv3_FILTER_LIMITS):
"""
Query NOAO Source Catalog, which is aligned to GAIA DR1.
The default `extra` query returns well-detected sources in red bands.
filter_limits : query on individual HSC filter magnitudes
"""
import astropy.table
msg = 'Query NOAO Source Catalog ({ra:.5f},{dec:.5f},{radius:.1f}\')'
print(msg.format(ra=ra, dec=dec, radius=radius))
if kron_max is not None:
extra += ' AND KronRadius < {0}'.format(kron_max)
if dsigma_max is not None:
extra += ' AND DSigma < {0}'.format(dsigma_max)
if filter_limits is not None:
limit_list = ['{0} < {1}'.format(f, filter_limits[f])
for f in filter_limits]
filter_selection = ' AND ({0})'.format(' OR '.join(limit_list))
extra += filter_selection
columns += [f for f in filter_limits]
db = 'dbo.SumPropMagAutoCat p join dbo.SumMagAutoCat m on p.MatchID = m.MatchID'
else:
db = 'dbo.SumPropMagAutoCat'
tab = query_tap_catalog(ra=ra, dec=dec, radius=radius,
corners=corners, max=max,
extra=extra, hubble_source_catalog=True,
verbose=verbose, db=db, columns=columns)
if clip_singles not in [None, False]:
rr = tab['NumImages'] > 1
if (rr.sum() > 0) & ((~rr).sum() > 0):
r0, r1 = tab[rr], tab[~rr]
idx, dr = utils.GTable(r0).match_to_catalog_sky(r1)
new = dr > clip_singles
xtab = astropy.table.vstack([r0, r1[new]])
if verbose:
msg = ('HSCv3: Remove {0} NumImages == 1 sources ' +
'with tolerance {1}')
print(msg.format((~new).sum(), clip_singles))
return tab
def get_nsc_catalog(ra=0., dec=0., radius=3, corners=None, max=100000, extra=' AND (rerr < 0.08 OR ierr < 0.08 OR zerr < 0.08) AND raerr < 0.2 AND decerr < 0.2', verbose=True):
"""
Query NOAO Source Catalog, which is aligned to GAIA DR1.
The default `extra` query returns well-detected sources in red bands.
"""
msg = 'Query NOAO Source Catalog ({ra:.5f},{dec:.5f},{radius:.1f}\')'
print(msg.format(ra=ra, dec=dec, radius=radius))
tab = | |
an ISO date formatted string or a Python `datetime` object.
:return: Returns `None` if no alerts were found, otherwise returns a dict with three keys: "n" and "t", the same as supplied in the client
data, the key "a" (for alert) which contains a *description string* of the alert similar to `value < min_threshold` or
`value > max_threshold` and a key "p" (for prohibit insert), a bool on whether or not the data is allowed to be inserted.
"""
data_name, data_value, data_datetime = self._verify_data(data)
datatype_info = self._verify_datatype(data_name)
if data_name in self._cached_alert_specs:
alert_spec = self._cached_alert_specs[data_name]
else:
if datatype_info['alert_spec'] is not None:
alert_spec = AlertSpec.from_dict(datatype_info['alert_spec'])
self._cached_alert_specs[data_name] = alert_spec
else:
alert_spec = None
self._cached_alert_specs[data_name] = None
value_type = StorageType.type_enum(type(data_value))
if value_type.value != datatype_info['storage_type']:
database_logger.info('Received data insert with incorrect value type')
raise InvalidData('Value type is different from the registered data type')
if alert_spec is None or value_type is StorageType.STR:
return None
# If it's an array of values, convert the value to be whatever it needs to be according to the spec
if value_type == StorageType.ARRAY and alert_spec.array_treatment is not ArrayTreatment.INDIVIDUALLY:
data_value = ArrayTreatment.get_func(alert_spec.array_treatment)(data_value)
alert_msg = {'n': data_name, 't': int(data_datetime.timestamp()), 'p': alert_spec.prohibit_insert}
if alert_spec.abs_alert_thresholds is not None:
alert = self._verify_alert_abs_thresholds(data_value, alert_spec.abs_alert_thresholds)
if alert is not None:
alert_msg['a'] = alert
return alert_msg
if alert_spec.alert_intervals is not None:
for interval in alert_spec.alert_intervals:
alert = self._verify_alert_interval(data_value, interval)
if alert is not None:
alert_msg['a'] = alert
return alert_msg
# We can assume that past_avg_count is also not None since an AlertSpec checks for it
if alert_spec.avg_deviation is not None:
client_info = self._verify_client(client)
# Only check avg if the number of documents stored is already higher than the past_avg_count necessary
if self._database[self._Data][client_info['name']][datatype_info['name']].count_documents({}) >= alert_spec.past_avg_count:
past_values = [record['value'] for record in
self._database[self._Data][client_info['name']][datatype_info['name']]
.find(projection={'_id': 0, 'value': 1}).sort('_id', pymongo.DESCENDING).limit(alert_spec.past_avg_count)]
if value_type == StorageType.ARRAY and alert_spec.array_treatment is not ArrayTreatment.INDIVIDUALLY:
past_values = list(map(ArrayTreatment.get_func(alert_spec.array_treatment), past_values))
avg = np.mean(past_values, axis=0)
alert = self._verify_alert_avg_deviation(data_value, alert_spec.avg_deviation, avg)
if alert is not None:
alert_msg['a'] = alert
return alert_msg
return None
def query_data_client(self, client: Union[str, ObjectId], datatype: Union[str, ObjectId] = None,
date_range: Tuple[Union[str, int, datetime, None], Union[str, int, datetime, None]] = None) -> dict:
"""
Queries the data for a specific client.
:param client: Either the `ObjectID` or the name of the registered client.
:param datatype: An optional `ObjectID` or the name of the registered datatype as a filter.
:param date_range: An optional tuple that specifies the beginning and end dates for querying.
:return: A dict with all the data.
"""
# ======================= #
# Check client #
client_info = self._verify_client(client)
client_filter = client_info['name']
# ======================= #
# Check datatype #
if datatype:
datatype_info = self._verify_datatype(datatype)
datatype_filter = str(datatype_info['name'])
else:
datatype_filter = '.*'
# ======================= #
date_filter = self._setup_date_filter(date_range)
all_data = {}
# Filter breaks down collections that start with the prefix for data
# The name format is "data.[CLIENT_ID].[DATATYPE_ID]"
for coll in self._database.list_collection_names(filter={'name': {'$regex': f'{self._Data}\.{client_filter}\.{datatype_filter}'}}):
_, client, datatype = coll.split('.')
# Convert the returns to a list and add it to the dict
all_data[datatype] = list(self._database[coll].find(date_filter))
database_logger.info(f'Received successful client data query for client {client}')
return all_data
def query_data_type(self, datatype: Union[str, ObjectId], date_range: Tuple[Union[str, int, datetime, None], Union[str, int, datetime, None]] = None) -> dict:
"""
Queries the data for a specific datatype.
:param datatype: Either a `ObjectID` or the name of the registered datatype as a filter.
:param date_range: An optional tuple that specifies the beginning and end dates for querying.
:return: A dict with all the data.
"""
# ======================= #
# Check datatype #
datatype_info = self._verify_datatype(datatype)
datatype_filter = str(datatype_info['name'])
# ======================= #
date_filter = self._setup_date_filter(date_range)
all_data = {}
# Filter breaks down collections that start with the prefix for data
# The name format is "data.[CLIENT_ID].[DATATYPE_ID]"
for coll in self._database.list_collection_names(filter={'name': {'$regex': f'{self._Data}\..*\.{datatype_filter}'}}):
_, client, datatype = coll.split('.')
# Convert the returns to a list and add it to the dict
all_data[client] = list(self._database[coll].find(date_filter))
database_logger.info(f'Received successful datatype data query for datatype {datatype}')
return all_data
def query_all(self, date_range: Tuple[Union[str, int, datetime, None], Union[str, int, datetime, None]] = None) -> dict:
"""
Returns all actual data in the database, not including the metadata for clients and datatypes.
:param date_range: An optional tuple that specifies the beginning and end dates for querying.
:return: A dict with all the data.
"""
date_filter = self._setup_date_filter(date_range)
all_data = {}
# Filter breaks down collections that start with the prefix for data
# The name format is "data.[CLIENT_ID].[DATATYPE_ID]"
for coll in self._database.list_collection_names(filter={'name': {'$regex': f'{self._Data}\.'}}):
_, client, datatype = coll.split('.')
# Create the dicts for the client if it doesn't exist on the return yet
if not all_data.get(client):
all_data[client] = {}
# Create the dicts for the datatype on the client if it doesn't exist on the return yet
if not all_data[client].get(datatype):
all_data[client][datatype] = {}
# Convert the returns to a list and add it to the dict
all_data[client][datatype] = list(self._database[coll].find(date_filter))
database_logger.info('Received successful generic data query')
return all_data
def query_datatypes(self) -> list:
"""
Queries all the registered datatypes in the database.
:return: A list of datatypes on the database.
"""
database_logger.info('Received query for datatypes')
# TODO: Add some filters?
return list(self._type_metadata.find())
def query_clients(self) -> list:
"""
Queries all the registered clients in the database.
:return: A list of clients on the database.
"""
database_logger.info('Received query for clients')
# TODO: Extra logging
return list(self._client_registry.find())
def close(self) -> None:
"""
Closes the connection to the database. If the database is used again, it will be automatically re-opened.
"""
self._client.close()
def _verify_client(self, client: Union[str, ObjectId]) -> dict:
client_info = None
if client in self._cached_clients:
return self._cached_clients[client]
if isinstance(client, str):
client_info = self._client_registry.find_one({'name': client})
elif isinstance(client, ObjectId):
client_info = self._client_registry.find_one({'_id': client})
if client_info is None:
database_logger.info(f'Received data insert request for non registered client {client}')
raise InvalidClient('Specified client has not been registered')
self._cached_clients[client_info['name']] = client_info
self._cached_clients[client_info['_id']] = client_info
return client_info
def _verify_data(self, data: dict) -> Tuple[str, str, datetime]:
data_name = data.get('n') or data.get('name')
if data_name is None:
database_logger.info('Received data insert with missing data name')
raise InvalidData('Data name "n" or "name" not specified')
data_value = data.get('v') or data.get('value')
if data_value is None:
database_logger.info('Received data insert with missing data value')
raise InvalidData('Data value "v" or "value" not specified')
data_timestamp = data.get('t') or data.get('time')
if data_timestamp is None:
database_logger.info('Received data insert with missing data time')
raise InvalidData('Data timestamp "t" or "time" not specified')
try:
data_datetime = self._parse_timestamp(data_timestamp)
except InvalidData:
database_logger.info('Received data insert with invalid time')
raise
return data_name, data_value, data_datetime
def _verify_datatype(self, datatype: Union[str, ObjectId]) -> dict:
datatype_info = None
if datatype in self._cached_datatypes:
return self._cached_datatypes[datatype]
if isinstance(datatype, str):
datatype_info = self._type_metadata.find_one({'name': datatype})
elif isinstance(datatype, ObjectId):
datatype_info = self._type_metadata.find_one({'_id': datatype})
if datatype_info is None:
database_logger.info(f'Received client data query request for non registered datatype {datatype}')
raise InvalidData('Specified datatype has not been registered')
self._cached_datatypes[datatype_info['name']] = datatype_info
self._cached_datatypes[datatype_info['_id']] = datatype_info
return datatype_info
@staticmethod
def _setup_date_filter(date_range: Tuple[Union[str, int, datetime, None], Union[str, int, datetime, None]]) -> dict:
date_filter = None
if date_range is not None:
if not isinstance(date_range, list):
raise TypeError('Date range is not a list')
date_filter = {}
if len(date_range) != 2:
raise ValueError('Expected 2 values in date range')
start_date = DataManager._parse_timestamp(date_range[0]) if date_range[0] is not None else None
end_date = DataManager._parse_timestamp(date_range[1]) if date_range[1] is not None else None
if start_date is None and end_date is None:
raise ValueError('Either the start date or the end date must not be None')
# Swap dates if they are inverted
# Better to fix the mistake than to raise an exception when possible
if (start_date is not None and end_date is not None) and start_date > end_date:
start_date, end_date = end_date, start_date
date_filter['datetime'] = {}
if start_date:
date_filter['datetime']['$gte'] = start_date
if end_date:
date_filter['datetime']['$lte'] = end_date
return date_filter
@staticmethod
def _parse_timestamp(t):
try:
if isinstance(t, datetime):
return t
if isinstance(t, str):
return datetime.fromisoformat(t)
if isinstance(t, int):
return datetime.fromtimestamp(t)
except ValueError:
raise InvalidData('Timestamp format is invalid. Expected datetime object or ISO str or int timestamp')
raise InvalidData('Timestamp type is invalid, expected datetime object, str or int')
@staticmethod
def _verify_bounds(bounds: tuple, alert_spec: AlertSpec, expected_type: StorageType):
# Check that type isn't strings
if expected_type is not StorageType.NUMBER:
raise TypeError('Bounds and Thresholds can only be numbers')
# Check bounds object, types and values
if bounds is not None:
if len(bounds) != 2:
raise ValueError('Expected 2 values in bounds')
if not (StorageType.is_instance(bounds[0], expected_type) or bounds[0] is None) or \
not (StorageType.is_instance(bounds[1], expected_type) or bounds[1] is None):
raise TypeError(f'Types for bounds don\'t match with expected type {expected_type.name}')
# Swap bounds if they are inverted
# Better to fix the mistake than to raise an exception
if bounds[0] is not None and bounds[1] is not None and bounds[0] >= bounds[1]:
raise ValueError(f'Low bound {bounds[0]} cannot be higher than high bound {bounds[1]}')
# Check if thresholds are valid
if bounds is not None and alert_spec is not None:
if alert_spec.abs_alert_thresholds is not None:
if bounds[0] is not None:
if alert_spec.abs_alert_thresholds[0] is not None and alert_spec.abs_alert_thresholds[0] < bounds[0] or \
alert_spec.abs_alert_thresholds[1] is not None and alert_spec.abs_alert_thresholds[1] < bounds[0]:
raise ValueError(f'Alert thresholds can\'t be lower than low valid bound {bounds[0]}')
if bounds[1] is not None:
if alert_spec.abs_alert_thresholds[0] is not None and alert_spec.abs_alert_thresholds[0] > bounds[1] or \
alert_spec.abs_alert_thresholds[1] is not None and alert_spec.abs_alert_thresholds[1] > bounds[1]:
raise ValueError(f'Alert thresholds can\'t be higher than high valid bound {bounds[1]}')
if alert_spec.alert_intervals is not None:
for group in alert_spec.alert_intervals:
if bounds[0] is not None:
if group[0] is not None and group[0] < bounds[0] or \
group[1] is not None and group[1] < bounds[0]:
raise ValueError(f'Alert intervals can\'t be lower than low | |
# This script is to replace the missing values in a modified paleogeography.
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import os
import pyproj as proj4
import scipy.interpolate as si
# The CoordinateSystem and GeographicSystem classes are hereby used courtesy of Dr. <NAME>; his
# repository of excellent cartography tools (among other things) can be found at https://github.com/deeplycloudy.
# Developer notes: This code could be more elegant, and because it involves several loops through data, can be
# somewhat slow. The 'nhn' interpolation method is the slowest (~35 minutes), the 'nn' method follows, and the
# 'nvn' method is the quickest. A global averaging method may be very fast in the future when it is implemented.
# Finally, this code could be rewritten to calculate exactly which locations within the grids need to be modified
# and from where for each nearest neighbor interpolation scheme, and then that matrix could be passed to all of
# the necessary components of the code to speed it up. However, this improvement will have to wait, as I am too
# busy to implement it now.
# Last few IMPORTANT notes about running this script:
#
# 1) To change the input, output, and other model variables go to the very bottom of the script, under the
# 'if name = main' conditional. These should be the only thing you need to change, unless you wish to
# use a different interpolation scheme, which is determined when the initialize_new_paleobath instance
# is created with the interp_method argument.
#
# 2) This script assumes that there are no 'overhanging' ledges in the bathymetry (i.e. land cells above
# water cells). If there are--and I'm not even sure that would be allowed in CESM--this code will break
# in all kinds of places. You should probably fix those overhangs.
#
# 3) So far, this script has been built to interpolate the restart data to NEW OCEAN CELLS, and is not
# equipped to remove ocean cells (turning them to land cells). This addition will likely come soon, it
# was simply not necessary so far in this research project. It may not even be necessary, so long as
# KMT file of the new paleobathymetry is accurate. Not sure exactly how CESM would handle that.
#
# 4) The nvn interpolation DOES NOT SUPPORT 2D GRID INTERPOLATION AT THIS TIME.
#
# Published Oct. 20, 2017 - <NAME>
class CoordinateSystem(object):
"""The abstract coordinate system handling provided here works as follows.
Each coordinate system must be able to convert data to a common coordinate system, which is chosen to be ECEF cartesian.
data -> common system
common system -> dislpay coordinates
This is implemented by the fromECEF and toECEF methods in each coordinate system object.
User code is responsible for taking data in its native coord system,
transforming it using to/fromECEF using the a coord system appropriate to the data, and then
transforming that data to the final coordinate system using another coord system.
This class maintains an attribute WGS84xyz that can be used in
transformations to/from the WGS84 ECEF cartesian system, e.g.
>>> WGS84lla = proj4.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
>>> projectedData = proj4.transform(WGS84lla, coordinateSystem.WGS84xyz, lat, lon, alt )
The ECEF system has its origin at the center of the earth, with the +Z toward the north pole,
+X toward (lat=0, lon=0), and +Y right-handed orthogonal to +X, +Z
Depends on pyproj, http://code.google.com/p/pyproj/ to handle the ugly details of
various map projections, geodetic transforms, etc.
"You can think of a coordinate system as being something like character encodings,
but messier, and without an obvious winner like UTF-8." - Django OSCON tutorial, 2007
http://toys.jacobian.org/presentations/2007/oscon/tutorial/
"""
WGS84xyz = proj4.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
def coordinates():
"""Return a tuple of standarized coordinate names"""
raise NotImplemented
def fromECEF(self, x, y, z):
"""Take ECEF x, y, z values and return x, y, z in the coordinate system defined by the object subclass"""
raise NotImplemented
def toECEF(self, x, y, z):
"""Take x, y, z in the coordinate system defined by the object subclass and return ECEF x, y, z"""
raise NotImplemented
class GeographicSystem(CoordinateSystem):
"""
Coordinate system defined on the surface of the earth using latitude, longitide, and altitude, referenced to WGS84 ellipse
"""
WGS84lla = proj4.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
def toECEF(self, lon, lat, alt):
projectedData = np.array(proj4.transform(GeographicSystem.WGS84lla, CoordinateSystem.WGS84xyz, lon, lat, alt ))
if len(projectedData.shape) == 1:
return projectedData[0], projectedData[1], projectedData[2]
else:
return projectedData[0,:], projectedData[1,:], projectedData[2,:]
def fromECEF(self, x, y, z):
projectedData = np.array(proj4.transform(CoordinateSystem.WGS84xyz, GeographicSystem.WGS84lla, x, y, z ))
if len(projectedData.shape) == 1:
return projectedData[0], projectedData[1], projectedData[2]
else:
return projectedData[0,:], projectedData[1,:], projectedData[2,:]
class initialize_new_paleobath(GeographicSystem):
def __init__(self,oldtemp,newkmt,tlat,tlon,tdepth,twodeeint=True,interp_method='nhn',):
'''The first argument is the variable from the restart file.
The second argument is the shape of the new paleobath.
The third argument
The third argument is the interpolation method:
'''
print('\n\nInterpolation method chosen: %s'%interp_method)
self.tdi = twodeeint
self.emptyvalue = 0 # Fill value for land cells.
self.tlat = tlat # Please make sure this is the T-grid, where scalar variables are defined.
self.tlon = tlon # See above.
self.tdepth = tdepth # See above.
self.im = interp_method
self.ns = newkmt # 2D new shape (new kmt data), not pythonic (ie unlike self.oldkmt).
self.ov = oldtemp # 3D variable data from restart file (preferably temperature).
self.oldkmt = self.getNearestPointAbove() # 2D kmt from oldvariable. Assumes no overhang.
# NOTE: This is kmt-1, so that it is a pythonic index.
self.new = self.find_new_cells()
def find_new_cells(self,):
'''This function identifies which new ocean cells are being introduced with the new
kmt. It does not at this time identify where the ocean is being removed in favor
of land.'''
# Define a list of new cells to be filled for the new rst file.
# Also define a list that will gather data for the interpolation scheme, if necessary.
new = []
interp_info = []
tdi_interp_info = []
obsolete = []
print('\n\nFinding new seafloor...')
for j in range(self.ov.shape[1]): # y-coord
for i in range(self.ov.shape[2]): # x-coord
for k in range(self.ov.shape[0]): # z-coord of new data
# Check to see if cursor is below old seafloor and above new seafloor...
if k > self.oldkmt[j,i] and k < self.ns[j,i]:
# ...and if it is, append the location to the 'new cell' array as [depth,lon,lat] indices.
new.append([k,j,i])
# ...and if it is above old seafloor but below the new one (i.e. user added land cells)...
elif k <= self.oldkmt[j,i] and k > self.ns[j,i]:
obsolete.append([k,j,i])
# ...and if it is above old seafloor, create an organized array of d,lon,lat,value for
# use in the interpolation methods of this code.
if k <= self.oldkmt[j,i]:
if self.im == 'nn' or self.im == 'nhn' or self.im == 'mq':
interp_info.append([self.tdepth[k],self.tlon[j,i],self.tlat[j,i],k,j,i])
# Collect the locations of all of the valid ocean cells at sea level for 2D interp...
if self.tdi == True:
if k == 0:
tdi_interp_info.append([self.tdepth[k],self.tlon[j,i],self.tlat[j,i],k,j,i])
# Convert new list into an array...size [x,3], where x is the number of cells to be filled,
# and the other dimension is 0=k, 1=j, and 2=i. Also convert interp_info into an array,as well as the
# 2D interpolation info.
new = np.array(new)
# Arrays to determine what exists in old paleogeo but not in new...these cells will be set to 0.
self.obsolete = np.array(obsolete)
self.bad2d = self.ns == 0
if self.im == 'nn' or self.im == 'nhn' or self.im == 'mq':
self.interp_info = np.array(interp_info)
self.tdi_interp_info = np.array(tdi_interp_info)
return new
def create_new_rst(self,rstin,rstout,):
'''Fill the empty cells in self.new with the determined values which can be created via any of
the following methods:
Nearest vertical neighbor (nvn)
Nearest horizontal neighbor (nhn)
Nearest neighbor (nn)
Reads in rstin, writes out to rstout
'''
if self.im == 'nn': # Nearest-Neighbor (nn)
# Convert lat, lon, depth grid to x, y, z on data grid:
print('\n\nConverting from LLA to XYZ coordinate system...')
self.xyz_grid = self.geodetic2geocentric(self.interp_info[:,1],self.interp_info[:,2],self.interp_info[:,0])
if self.tdi == True:
# Convert the locations of sea-surface (2D) variables (only sea-surface) to xyz...
print('\n\nConverting from LLA to XYZ coordinate system...for 2D variables...')
self.tdi_xyz_grid = | |
request for private IP on its own. Otherwise, false
"""
EnablePrivateIPRequest = None # bool
"""
Amount of emails sent from this Account
"""
TotalEmailsSent = None # long
"""
Percent of Unknown users - users that couldn't be found
"""
UnknownUsersPercent = None # double
"""
Percent of Complaining users - those, who do not want to receive email from you.
"""
AbusePercent = None # double
"""
Percent of Bounced users
"""
FailedSpamPercent = None # double
"""
Numeric reputation
"""
Reputation = None # double
"""
Amount of emails Account can send daily
"""
DailySendLimit = None # long
"""
Account's current status.
"""
Status = None # string
"""
Maximum size of email including attachments in MB's
"""
EmailSizeLimit = None # int
"""
Maximum number of contacts the Account can have
"""
MaxContacts = None # int
"""
Sending permission setting for Account
"""
SendingPermission = None # ApiTypes.SendingPermission
"""
"""
HasModify2FA = None # bool
"""
"""
ContactsCount = None # int
"""
Detailed settings of Sub-Account.
"""
class SubAccountSettings:
"""
Proper email address.
"""
Email = None # string
"""
True, if Account needs credits to send emails. Otherwise, false
"""
RequiresEmailCredits = None # bool
"""
Amount of credits added to Account automatically
"""
MonthlyRefillCredits = None # double
"""
Maximum size of email including attachments in MB's
"""
EmailSizeLimit = None # int
"""
Amount of emails Account can send daily
"""
DailySendLimit = None # int
"""
Maximum number of contacts the Account can have
"""
MaxContacts = None # int
"""
True, if Account can request for private IP on its own. Otherwise, false
"""
EnablePrivateIPRequest = None # bool
"""
True, if you want to use Contact Delivery Tools. Otherwise, false
"""
EnableContactFeatures = None # bool
"""
Sending permission setting for Account
"""
SendingPermission = None # ApiTypes.SendingPermission
"""
Name of your custom IP Pool to be used in the sending process
"""
PoolName = None # string
"""
Public key for limited access to your Account such as contact/add so you can use it safely on public websites.
"""
PublicAccountID = None # string
"""
True, if you want to allow two-factor authentication. Otherwise, false.
"""
Allow2FA = None # bool?
"""
"""
class SubaccountSummary:
"""
"""
EmailsSentToday = None # int
"""
"""
EmailsSentThisMonth = None # int
"""
Add-on support options for your Account.
"""
class SupportPlan(Enum):
"""
Free support.
"""
Free = 0
"""
In-app support option for $1/day.
"""
Priority = 1
"""
In-app real-time chat support option for $7/day.
"""
Premium = 2
"""
"""
class TagType(Enum):
"""
"""
Template = 0
"""
"""
LandingPage = 1
"""
Template
"""
class Template:
"""
ID number of template.
"""
TemplateID = None # int
"""
0 for API connections
"""
TemplateType = None # ApiTypes.TemplateType
"""
Filename
"""
Name = None # string
"""
Date of creation in YYYY-MM-DDThh:ii:ss format
"""
DateAdded = None # DateTime
"""
CSS style
"""
Css = None # string
"""
Default subject of email.
"""
Subject = None # string
"""
Default From: email address.
"""
FromEmail = None # string
"""
Default From: name.
"""
FromName = None # string
"""
HTML code of email (needs escaping).
"""
BodyHtml = None # string
"""
AMP code of email (needs escaping).
"""
BodyAmp = None # string
"""
Text body of email.
"""
BodyText = None # string
"""
ID number of original template.
"""
OriginalTemplateID = None # int
"""
"""
OriginalTemplateName = None # string
"""
Enum: 0 - private, 1 - public, 2 - mockup
"""
TemplateScope = None # ApiTypes.TemplateScope
"""
Template's Tags
"""
Tags = None # List<string>
"""
List of templates (including drafts)
"""
class TemplateList:
"""
List of templates
"""
Templates = None # List<ApiTypes.Template>
"""
Total of templates
"""
TemplatesCount = None # int
"""
List of draft templates
"""
DraftTemplate = None # List<ApiTypes.Template>
"""
Total of draft templates
"""
DraftTemplatesCount = None # int
"""
"""
class TemplateOrder(Enum):
"""
"""
DateAddedAscending = 0
"""
"""
DateAddedDescending = 1
"""
"""
NameAscending = 2
"""
"""
NameDescending = 3
"""
"""
DateModifiedAscending = 4
"""
"""
DateModifiedDescending = 5
"""
"""
class TemplateScope(Enum):
"""
Template is available for this account only.
"""
Private = 0
"""
Template is available for this account and it's sub-accounts.
"""
Public = 1
"""
Template is a temporary draft, not to be used permanently.
"""
Draft = 2
"""
Tag used for tagging multiple Templates
"""
class TemplateTag:
"""
Tag's value
"""
Name = None # string
"""
Tag type
"""
Type = None # ApiTypes.TagType
"""
A list of your personal and global Template Tags
"""
class TemplateTagList:
"""
List of personal Tags
"""
Tags = None # List<ApiTypes.TemplateTag>
"""
List of globally available Tags
"""
GlobalTags = None # List<ApiTypes.TemplateTag>
"""
"""
class TemplateType(Enum):
"""
Template supports any valid HTML
"""
RawHTML = 0
"""
Template is created for email and can only be modified in the drag and drop email editor
"""
DragDropEditor = 1
"""
Template is created for landing page and can only be modified in the drag and drop langing page editor
"""
LandingPageEditor = 2
"""
Information about tracking link and its clicks.
"""
class TrackedLink:
"""
URL clicked
"""
Link = None # string
"""
Number of clicks
"""
Clicks = None # string
"""
Percent of clicks
"""
Percent = None # string
"""
HTTP or HTTPS Protocal used for link tracking.
"""
class TrackingType(Enum):
"""
Tracking protocal that is not encrypted.
"""
Http = 0
"""
Tracking protocal using an external SSL Certificate for encryption.
"""
ExternalHttps = 1
"""
Tracking protocal using an internal SSL Certificate for encyrption.
"""
InternalCertHttps = 2
"""
Tracking protocal using LetsEncrypt Certificate for encryption.
"""
LetsEncryptCert = 3
"""
Status of ValidDomain to determine how often tracking validation should be performed.
"""
class TrackingValidationStatus(Enum):
"""
"""
Validated = 0
"""
"""
NotValidated = 1
"""
"""
Invalid = 2
"""
"""
Broken = 3
"""
Account usage
"""
class Usage:
"""
Proper email address.
"""
Email = None # string
"""
True, if this Account is a Sub-Account. Otherwise, false
"""
IsSubAccount = None # bool
"""
"""
List = None # List<ApiTypes.UsageData>
"""
Detailed data about daily usage
"""
class UsageData:
"""
Date in YYYY-MM-DDThh:ii:ss format
"""
Date = None # DateTime
"""
Number of finished tasks
"""
JobCount = None # int
"""
Overall number of recipients
"""
RecipientCount = None # int
"""
Number of inbound emails
"""
InboundCount = None # int
"""
Number of attachments sent
"""
AttachmentCount = None # int
"""
Size of attachments sent
"""
AttachmentsSize = None # long
"""
Calculated cost of sending
"""
Cost = None # decimal
"""
Number of pricate IPs
"""
PrivateIPCount = None # int?
"""
"""
PrivateIPCost = None # decimal
"""
Number of SMS
"""
SmsCount = None # int?
"""
Overall cost of SMS
"""
SmsCost = None # decimal
"""
Cost of email credits
"""
EmailCreditsCost = None # int?
"""
Daily cost of Contact Delivery Tools
"""
ContactCost = None # decimal
"""
Number of contacts
"""
ContactCount = None # long
"""
"""
SupportCost = None # decimal
"""
"""
EmailCost = None # decimal
"""
"""
VerificationCost = None # decimal
"""
"""
VerificationCount = None # int
"""
"""
InboundEmailCost = None # decimal
"""
"""
InboundEmailCount = None # int
"""
"""
class ValidationError:
"""
"""
TXTRecord = None # string
"""
"""
Error = None # string
"""
"""
class ValidationStatus:
"""
"""
IsValid = None # bool
"""
"""
Errors = None # List<ApiTypes.ValidationError>
"""
"""
Log = None # string
"""
"""
class ValidEmail:
"""
"""
ValidEmailID = None # int
"""
Proper email address.
"""
Email = None # string
"""
"""
Validated = None # bool
"""
Notification webhook setting
"""
class Webhook:
"""
Public webhook ID
"""
WebhookID = None # string
"""
Filename
"""
Name = None # string
"""
Creation date.
"""
DateCreated = None # DateTime?
"""
Last change date
"""
DateUpdated = None # DateTime?
"""
URL of notification.
"""
URL = None # string
"""
"""
NotifyOncePerEmail = None # bool
"""
"""
NotificationForSent = None # bool
"""
"""
NotificationForOpened = None # bool
"""
"""
NotificationForClicked = None # bool
"""
"""
NotificationForUnsubscribed = None # bool
"""
"""
NotificationForAbuseReport = None # bool
"""
"""
NotificationForError = None # bool
"""
Lists web notification options of your account.
"""
class WebNotificationOptions:
"""
URL address to receive web notifications to parse and process.
"""
WebNotificationUrl = None # string
"""
True, if you want to send web notifications for sent email. Otherwise, false
"""
WebNotificationForSent = None # bool
"""
True, if you want to send web notifications for opened email. Otherwise, false
"""
WebNotificationForOpened = None # bool
"""
True, if you want to send web notifications for clicked email. Otherwise, false
"""
WebNotificationForClicked = None # bool
"""
True, if you want to send web notifications for unsubscribed email. Otherwise, false
"""
WebnotificationForUnsubscribed = None # bool
"""
True, if you want to send web notifications for complaint email. Otherwise, false
"""
WebNotificationForAbuse = None # bool
"""
True, if you want to send web notifications for bounced email. Otherwise, false
"""
WebNotificationForError = None # bool
"""
True, if you want to receive notifications for each type only once per email. Otherwise, false
"""
WebNotificationNotifyOncePerEmail = None # bool
"""
Manage your AccessTokens (ApiKeys)
"""
class AccessToken:
@staticmethod
def Add(tokenName, accessLevel, restrictAccessToIPRange={}, type=ApiTypes.AccessTokenType.APIKey, expires=None):
"""
Add new AccessToken with appropriate AccessLevel (permission).
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string tokenName - Name of the AccessToken for ease of reference. If you want to create a SMTP access, then tokenname must be a valid email address
ApiTypes.AccessLevel accessLevel - Level of access (permission) to our API.
IEnumerable<string> restrictAccessToIPRange - Comma separated list of CIDR notated IP ranges that this token can connect from. (default None)
ApiTypes.AccessTokenType type - (default ApiTypes.AccessTokenType.APIKey)
DateTime? expires - (default None)
Returns string
"""
parameters = {
'tokenName': tokenName,
'accessLevel': accessLevel.value,
'restrictAccessToIPRange': ";".join(map(str, restrictAccessToIPRange)),
'type': type.value,
'expires': expires}
return ApiClient.Request('GET', '/accesstoken/add', parameters)
@staticmethod
def Delete(tokenName, type=None):
"""
Permanently delete AccessToken from your Account.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string tokenName - Name of the AccessToken for ease of reference. If you want to create a SMTP access, then tokenname must be a valid email address
ApiTypes.AccessTokenType? type - (default None)
"""
parameters = {
'tokenName': tokenName,
'type': type.value}
return ApiClient.Request('GET', '/accesstoken/delete', parameters)
@staticmethod
def List(parameters=None):
"""
List all the AccessToken's in your Account.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
Returns List<ApiTypes.AccessToken>
"""
return ApiClient.Request('GET', '/accesstoken/list', parameters)
@staticmethod
def Update(tokenName, accessLevel, expires, newTokenName=None, restrictAccessToIPRange={}, type=None):
"""
Update AccessToken with a new name or AccessLevel.
string apikey - ApiKey that gives you access to our SMTP and HTTP API's.
string tokenName - Name of the AccessToken for ease of reference. If you want to create a SMTP access, then tokenname must be a valid email | |
)
self._ats.registerCallback(self.msg_filter_callback)
rospy.logdebug("Camera sensor message_filter created.")
# Create state listener
self._tf2_buffer = tf2_ros.Buffer()
self._tf2_listener = tf2_ros.TransformListener(self._tf2_buffer)
# Create publisher to publish the place pose
rospy.logdebug("Creating place pose publisher...")
self._place_pose_pub = rospy.Publisher(
"moveit/place_pose", PoseStamped, queue_size=10
)
rospy.logdebug("Place pose publisher created.")
# Create pose subscriber
rospy.logdebug("Creating grasp pose subscriber...")
self._pose_sub = rospy.Subscriber(
"gqcnn_grasp/pose", PoseStamped, self.get_pose_callback
)
rospy.logdebug("Grasp pose subscriber created.")
def get_pose_callback(self, pose_msg):
"""Callback function of the 'gqcnn_graps/pose' subscriber. This function updates the
self.pose_msg member variable.
Parameters
----------
pose_msg : :py:obj:`!geometry_msgs.PosedStamed`
Grasp pose msgs.
"""
# Update pose_msg
rospy.loginfo("Received grasp pose.")
self.pose_msg = pose_msg
def msg_filter_callback(
self,
color_image,
color_image_rect,
depth_image_rect,
camera_info_hd,
camera_info_qhd,
camera_info_sd,
):
"""Callback function of the message filter. This message filter subscribed
to a number of camera topics which are required by the panda_autograsp
solution.
Parameters
----------
color_image : :py:obj:`!sensor_msgs.msg.Image`
The color image.
color_image_rect : :py:obj:`!sensor_msgs.msg.Image`
The rectified color image.
depth_image_rect : :py:obj:`!sensor_msgs.msg.Image`
The depth image.
camera_info_hd : :py:obj:`!sensor_msgs.msg.CameraInfo`
The HD camera info topic.
camera_info_qhd : :py:obj:`!sensor_msgs.msg.CameraInfo`
The QHD camera topic.
camera_info_sd : :py:obj:`!sensor_msgs.msg.CameraInfo`
The SD camera topic.
"""
# Call the grasp_planner_service
self.color_image = color_image
self.color_image_rect = color_image_rect
self.depth_image_rect = depth_image_rect
self.camera_info_hd = camera_info_hd
self.camera_info_qhd = camera_info_qhd
self.camera_info_sd = camera_info_sd
def compute_grasp_service(self, req):
"""This service is used for computing a vallid grasp out of the
sensor data. This is done by calling the main grasp computation service
of the :py:obj:`grasp_planner_server.py` module with the sensor data as
its input.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.ComputeGrasp`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Call grasp computation service
if not self.bounding_box_enabled:
self.grasp = self._gqcnn_grasp_planning_srv(
self.color_image_rect, self.depth_image_rect, self.camera_info_sd
)
else:
self.grasp = self._gqcnn_grasp_planning_bounding_box_srv(
self.color_image_rect,
self.depth_image_rect,
self.camera_info_sd,
self.bounding_box,
)
# Print grasp
position = self.grasp.grasp.pose.position
orientation = self.grasp.grasp.pose.orientation
pose_array = [
position.x,
position.y,
position.z,
orientation.x,
orientation.y,
orientation.z,
orientation.w,
]
rospy.logdebug(
"Grasp pose result in kinect2_rgb_camera_frame: x={0}, y={1}, z={2}, "
"q1={3}, q2={4}, q3={5} and q4={6}".format(*pose_array)
)
# Test if successful
if self.grasp:
return True
else:
return False
def plan_grasp_service(self, req):
"""This service can be used to plan for the by the
:py:meth:`compute_grasp_service` computed grasp pose.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.PlanGrasp`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Get pose expressed in the panda_link0 frame
# Needed since the panda_link0 is the reference frame
# of the move group.
try:
grasp_pose_msg = copy.copy(self.pose_msg) # Create copy
grasp_pose_msg.header.stamp = (
rospy.Time.now()
) # As we use the default tf buffer we will set the time to be now
pose_msg = self._tf2_buffer.transform(
grasp_pose_msg, "panda_link0", rospy.Duration(1)
)
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
return False
# Display pose in panda_link0 frame
position = pose_msg.pose.position
orientation = pose_msg.pose.orientation
pose_array = [
position.x,
position.y,
position.z,
orientation.x,
orientation.y,
orientation.z,
orientation.w,
]
rospy.logdebug(
"Grasp pose result in panda_link0: x={0}, y={1}, z={2}, q1={3}, q2={4}, "
"q3={5} and q4={6}".format(*pose_array)
)
# Call grasp plan to pose service
result = self._plan_to_pose_srv(pose_msg.pose)
# Test if successful
if result.success:
return True
else:
return False
def plan_place_service(self, req):
"""This service computes the movement plan for placing the
object at the desired goal position. The place goal position
can be changed by modifying the ``cfg/main_config.yaml``
document.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.PlanPlace`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Retrieve the starting grasp pose (as expressed in the panda_link0 frame
try:
grasp_pose_msg = copy.copy(self.pose_msg) # Create copy
grasp_pose_msg.header.stamp = (
rospy.Time.now()
) # As we use the default tf buffer we will set the time to be now
grasp_pose_msg = self._tf2_buffer.transform(
self.pose_msg, "panda_link0", rospy.Duration(1)
)
except (
tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException,
):
return False
# Generate place pose relative to the panda_link0 frame
# The location of this frame can be set in the `main_config.yaml` file
# in the cfg folder.
self.place_pose_msg = PoseStamped()
self.place_pose_msg.pose.position.x = MAIN_CFG["main"]["place"]["pose"]["x_pos"]
self.place_pose_msg.pose.position.y = MAIN_CFG["main"]["place"]["pose"]["y_pos"]
self.place_pose_msg.pose.position.z = (
grasp_pose_msg.pose.position.z
) # Set equal to start grasp height
self.place_pose_msg.pose.orientation.x = (
grasp_pose_msg.pose.orientation.x
) # Set equal to grasp pose orientation
self.place_pose_msg.pose.orientation.y = (
grasp_pose_msg.pose.orientation.y
) # Set equal to grasp pose orientation
self.place_pose_msg.pose.orientation.z = (
grasp_pose_msg.pose.orientation.z
) # Set equal to grasp pose orientation
self.place_pose_msg.pose.orientation.w = (
grasp_pose_msg.pose.orientation.w
) # Set equal to grasp pose orientation
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = "panda_link0"
self.place_pose_msg.header = header
# Publish place pose
self._place_pose_pub.publish(self.place_pose_msg)
# Create a intermediate pose
# Done to pick up the object instead of sliding it.
self.pickup_pose_msg = PoseStamped()
self.pickup_pose_msg.pose.position.x = grasp_pose_msg.pose.position.x
self.pickup_pose_msg.pose.position.y = grasp_pose_msg.pose.position.y
self.pickup_pose_msg.pose.position.z = (
grasp_pose_msg.pose.position.z + MAIN_CFG["main"]["pickup"]["height"]
) # Add pickup height
self.pickup_pose_msg.pose.orientation.x = (
grasp_pose_msg.pose.orientation.x
) # Set equal to grasp pose orientation
self.pickup_pose_msg.pose.orientation.y = (
grasp_pose_msg.pose.orientation.y
) # Set equal to grasp pose orientation
self.pickup_pose_msg.pose.orientation.z = (
grasp_pose_msg.pose.orientation.z
) # Set equal to grasp pose orientation
self.pickup_pose_msg.pose.orientation.w = (
grasp_pose_msg.pose.orientation.w
) # Set equal to grasp pose orientation
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = "panda_link0"
self.pickup_pose_msg.header = header
# Call grasp plan to pose service
result = self._plan_to_path_srv(
[self.pickup_pose_msg.pose, self.place_pose_msg.pose]
)
# Test if successful
if result.success:
return True
else:
return False
def visualize_grasp_service(self, req):
"""This service can be used to visualize the planned grasp.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.VizualizeGrasp`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Call grasp computation service
result = self._visualize_plan_srv()
# Test if successful
if result.success:
return True
else:
return False
def execute_grasp_service(self, req):
"""This service is used to execute the computed grasp.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.ExecuteGrasp`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Call grasp computation service
result = self._execute_plan_srv()
# Test if successful
if result.success:
return True
else:
return False
def calibrate_sensor_service(self, req):
"""This service can be used to perform the sensor/world
calibration. To do this you need to place a chessboard/aruco
board in the bottom left corner of the robot table. You have
to define which calibration pattern you use in the ``cfg/main_config.yaml
file``. After a successful camera/world transformation matrix is computed
this matrix is send to the :py:mod:`tf2_broadcaster` module and the
sensor frame position is updated.
Parameters
----------
req : :py:obj:`panda_autograsp.msg.ExecuteGrasp`
Empty service request.
Returns
-------
bool
Returns a bool to specify whether the plan was executed successfully.
"""
# Retrieve camera pose
if self.gazebo:
# Return True as we don't need a calibration in simulation
return True
else:
# Perform calibration
retval, self.rvec, self.tvec = self._camera_world_calibration(
calib_type=self.pose_calib_method
)
# Test if successful
if retval:
# Publish the camera frame
self.broadcast_camera_frame(calib_type=self.pose_calib_method)
# return result
return True
else:
return False
def _camera_world_calibration(self, calib_type=POSE_CALIB_METHOD):
"""Perform camera world calibration (External camera matrix) using
a chessboard or several aruco markers.
Parameters
----------
calib_type : :py:obj:`str`
Calibration pattern type.
Returns
-------
retval : :py:obj:`bool`
Calibration succes bool.
:py:obj:
rvec : :py:obj:`list`
Rotation vector.
tvec : :py:obj:`list`
Translation vector.
"""
# Switch between different calibrations
if calib_type == "chessboard":
return (
self.chessboard_pose_estimation()
) # Perform calibration using an chessboard
else:
return (
self.aruco_board_pose_estimation()
) # Perform calibration using an arucoboard
def aruco_board_pose_estimation(self):
"""Function that performs the camera/world calibration by using
a Aruco board as a reference.
Returns
-------
retval : :py:obj:`bool`
Calibration success bool.
rvec : :py:obj:`list`
Rotation vector.
tvec : :py:obj:`list`
Translation vector.
"""
# Get current time
start_time = rospy.get_time()
# Try till chessboard is found or till try time is over
while rospy.get_time() < start_time + CALIB_TRY_DURATION:
# Retrieve color image and convert to opencv format
color_image = self.color_image
camera_info = self.camera_info_hd
color_image_cv = self._cv_bridge.imgmsg_to_cv2(
color_image, desired_encoding="passthrough"
)
# Get camera information
camera_matrix = np.array(camera_info.K).reshape(3, 3)
dist_coeffs = camera_info.D # Default distortion parameters are 0
# Get gray image
gray = cv2.cvtColor(color_image_cv, cv2.COLOR_BGR2GRAY)
# Create screen display image
# Needed since opencv uses BGR instead of RGB
screen_img = cv2.cvtColor(copy.copy(color_image_cv), cv2.COLOR_RGB2BGR)
# | |
<gh_stars>0
'''Wrapper for dbmi.h
Generated with:
./ctypesgen.py --cpp gcc -E -I/Applications/GRASS-7.8.app/Contents/Resources/include -D_Nullable= -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -D__GLIBC_HAVE_LONG_LONG -lgrass_dbmiclient.7.8 -lgrass_dbmibase.7.8 /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h -o OBJ.x86_64-apple-darwin18.7.0/dbmi.py
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
_libs = {}
_libdirs = []
from .ctypes_preamble import *
from .ctypes_preamble import _variadic_function
from .ctypes_loader import *
add_library_search_dirs([])
# Begin libraries
_libs["grass_dbmiclient.7.8"] = load_library("grass_dbmiclient.7.8")
_libs["grass_dbmibase.7.8"] = load_library("grass_dbmibase.7.8")
# 2 libraries
# End libraries
# No modules
__int64_t = c_longlong # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/i386/_types.h: 46
__darwin_off_t = __int64_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/sys/_types.h: 71
fpos_t = __darwin_off_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 81
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 92
class struct___sbuf(Structure):
pass
struct___sbuf.__slots__ = [
'_base',
'_size',
]
struct___sbuf._fields_ = [
('_base', POINTER(c_ubyte)),
('_size', c_int),
]
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 98
class struct___sFILEX(Structure):
pass
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
class struct___sFILE(Structure):
pass
struct___sFILE.__slots__ = [
'_p',
'_r',
'_w',
'_flags',
'_file',
'_bf',
'_lbfsize',
'_cookie',
'_close',
'_read',
'_seek',
'_write',
'_ub',
'_extra',
'_ur',
'_ubuf',
'_nbuf',
'_lb',
'_blksize',
'_offset',
]
struct___sFILE._fields_ = [
('_p', POINTER(c_ubyte)),
('_r', c_int),
('_w', c_int),
('_flags', c_short),
('_file', c_short),
('_bf', struct___sbuf),
('_lbfsize', c_int),
('_cookie', POINTER(None)),
('_close', CFUNCTYPE(UNCHECKED(c_int), POINTER(None))),
('_read', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_seek', CFUNCTYPE(UNCHECKED(fpos_t), POINTER(None), fpos_t, c_int)),
('_write', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_ub', struct___sbuf),
('_extra', POINTER(struct___sFILEX)),
('_ur', c_int),
('_ubuf', c_ubyte * 3),
('_nbuf', c_ubyte * 1),
('_lb', struct___sbuf),
('_blksize', c_int),
('_offset', fpos_t),
]
FILE = struct___sFILE # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
dbAddress = POINTER(None) # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 144
dbToken = c_int # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 145
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
class struct__db_string(Structure):
pass
struct__db_string.__slots__ = [
'string',
'nalloc',
]
struct__db_string._fields_ = [
('string', String),
('nalloc', c_int),
]
dbString = struct__db_string # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 153
class struct__dbmscap(Structure):
pass
struct__dbmscap.__slots__ = [
'driverName',
'startup',
'comment',
'next',
]
struct__dbmscap._fields_ = [
('driverName', c_char * 256),
('startup', c_char * 256),
('comment', c_char * 256),
('next', POINTER(struct__dbmscap)),
]
dbDbmscap = struct__dbmscap # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 159
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
class struct__db_dirent(Structure):
pass
struct__db_dirent.__slots__ = [
'name',
'isdir',
'perm',
]
struct__db_dirent._fields_ = [
('name', dbString),
('isdir', c_int),
('perm', c_int),
]
dbDirent = struct__db_dirent # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
class struct__db_driver(Structure):
pass
struct__db_driver.__slots__ = [
'dbmscap',
'send',
'recv',
'pid',
]
struct__db_driver._fields_ = [
('dbmscap', dbDbmscap),
('send', POINTER(FILE)),
('recv', POINTER(FILE)),
('pid', c_int),
]
dbDriver = struct__db_driver # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
class struct__db_handle(Structure):
pass
struct__db_handle.__slots__ = [
'dbName',
'dbSchema',
]
struct__db_handle._fields_ = [
('dbName', dbString),
('dbSchema', dbString),
]
dbHandle = struct__db_handle # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
class struct__db_date_time(Structure):
pass
struct__db_date_time.__slots__ = [
'current',
'year',
'month',
'day',
'hour',
'minute',
'seconds',
]
struct__db_date_time._fields_ = [
('current', c_char),
('year', c_int),
('month', c_int),
('day', c_int),
('hour', c_int),
('minute', c_int),
('seconds', c_double),
]
dbDateTime = struct__db_date_time # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
class struct__db_value(Structure):
pass
struct__db_value.__slots__ = [
'isNull',
'i',
'd',
's',
't',
]
struct__db_value._fields_ = [
('isNull', c_char),
('i', c_int),
('d', c_double),
('s', dbString),
('t', dbDateTime),
]
dbValue = struct__db_value # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
class struct__db_column(Structure):
pass
struct__db_column.__slots__ = [
'columnName',
'description',
'sqlDataType',
'hostDataType',
'value',
'dataLen',
'precision',
'scale',
'nullAllowed',
'hasDefaultValue',
'useDefaultValue',
'defaultValue',
'select',
'update',
]
struct__db_column._fields_ = [
('columnName', dbString),
('description', dbString),
('sqlDataType', c_int),
('hostDataType', c_int),
('value', dbValue),
('dataLen', c_int),
('precision', c_int),
('scale', c_int),
('nullAllowed', c_char),
('hasDefaultValue', c_char),
('useDefaultValue', c_char),
('defaultValue', dbValue),
('select', c_int),
('update', c_int),
]
dbColumn = struct__db_column # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
class struct__db_table(Structure):
pass
struct__db_table.__slots__ = [
'tableName',
'description',
'numColumns',
'columns',
'priv_insert',
'priv_delete',
]
struct__db_table._fields_ = [
('tableName', dbString),
('description', dbString),
('numColumns', c_int),
('columns', POINTER(dbColumn)),
('priv_insert', c_int),
('priv_delete', c_int),
]
dbTable = struct__db_table # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
class struct__db_cursor(Structure):
pass
struct__db_cursor.__slots__ = [
'token',
'driver',
'table',
'column_flags',
'type',
'mode',
]
struct__db_cursor._fields_ = [
('token', dbToken),
('driver', POINTER(dbDriver)),
('table', POINTER(dbTable)),
('column_flags', POINTER(c_short)),
('type', c_int),
('mode', c_int),
]
dbCursor = struct__db_cursor # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
class struct__db_index(Structure):
pass
struct__db_index.__slots__ = [
'indexName',
'tableName',
'numColumns',
'columnNames',
'unique',
]
struct__db_index._fields_ = [
('indexName', dbString),
('tableName', dbString),
('numColumns', c_int),
('columnNames', POINTER(dbString)),
('unique', c_char),
]
dbIndex = struct__db_index # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
class struct__db_driver_state(Structure):
pass
struct__db_driver_state.__slots__ = [
'dbname',
'dbschema',
'open',
'ncursors',
'cursor_list',
]
struct__db_driver_state._fields_ = [
('dbname', String),
('dbschema', String),
('open', c_int),
('ncursors', c_int),
('cursor_list', POINTER(POINTER(dbCursor))),
]
dbDriverState = struct__db_driver_state # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
class struct_anon_7(Structure):
pass
struct_anon_7.__slots__ = [
'cat',
'val',
]
struct_anon_7._fields_ = [
('cat', c_int),
('val', c_int),
]
dbCatValI = struct_anon_7 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 270
class union_anon_8(Union):
pass
union_anon_8.__slots__ = [
'i',
'd',
's',
't',
]
union_anon_8._fields_ = [
('i', c_int),
('d', c_double),
('s', POINTER(dbString)),
('t', POINTER(dbDateTime)),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
class struct_anon_9(Structure):
pass
struct_anon_9.__slots__ = [
'cat',
'isNull',
'val',
]
struct_anon_9._fields_ = [
('cat', c_int),
('isNull', c_int),
('val', union_anon_8),
]
dbCatVal = struct_anon_9 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
class struct_anon_10(Structure):
pass
struct_anon_10.__slots__ = [
'n_values',
'alloc',
'ctype',
'value',
]
struct_anon_10._fields_ = [
('n_values', c_int),
('alloc', c_int),
('ctype', c_int),
('value', POINTER(dbCatVal)),
]
dbCatValArray = struct_anon_10 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
class struct__db_connection(Structure):
pass
struct__db_connection.__slots__ = [
'driverName',
'hostName',
'databaseName',
'schemaName',
'port',
'user',
'password',
'keycol',
'group',
]
struct__db_connection._fields_ = [
('driverName', String),
('hostName', String),
('databaseName', String),
('schemaName', String),
('port', String),
('user', String),
('password', String),
('keycol', String),
('group', String),
]
dbConnection = struct__db_connection # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
class struct_anon_11(Structure):
pass
struct_anon_11.__slots__ = [
'count',
'alloc',
'table',
'key',
'cat',
'where',
'label',
]
struct_anon_11._fields_ = [
('count', c_int),
('alloc', c_int),
('table', String),
('key', String),
('cat', POINTER(c_int)),
('where', POINTER(POINTER(c_char))),
('label', POINTER(POINTER(c_char))),
]
dbRclsRule = struct_anon_11 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 4
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_lowercase'):
continue
db_Cstring_to_lowercase = _lib.db_Cstring_to_lowercase
db_Cstring_to_lowercase.argtypes = [String]
db_Cstring_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 5
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_uppercase'):
continue
db_Cstring_to_uppercase = _lib.db_Cstring_to_uppercase
db_Cstring_to_uppercase.argtypes = [String]
db_Cstring_to_uppercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 6
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_add_column'):
continue
db_add_column = _lib.db_add_column
db_add_column.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbColumn)]
db_add_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 7
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__add_cursor_to_driver_state'):
continue
db__add_cursor_to_driver_state = _lib.db__add_cursor_to_driver_state
db__add_cursor_to_driver_state.argtypes = [POINTER(dbCursor)]
db__add_cursor_to_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 8
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_column_flags'):
continue
db_alloc_cursor_column_flags = _lib.db_alloc_cursor_column_flags
db_alloc_cursor_column_flags.argtypes = [POINTER(dbCursor)]
db_alloc_cursor_column_flags.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 9
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_table'):
continue
db_alloc_cursor_table = _lib.db_alloc_cursor_table
db_alloc_cursor_table.argtypes = [POINTER(dbCursor), c_int]
db_alloc_cursor_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 10
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_table_column'):
continue
db_append_table_column = _lib.db_append_table_column
db_append_table_column.argtypes = [POINTER(dbTable), POINTER(dbColumn)]
db_append_table_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 11
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_dirent_array'):
continue
db_alloc_dirent_array = _lib.db_alloc_dirent_array
db_alloc_dirent_array.argtypes = [c_int]
db_alloc_dirent_array.restype = POINTER(dbDirent)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 12
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_handle_array'):
continue
db_alloc_handle_array = _lib.db_alloc_handle_array
db_alloc_handle_array.argtypes = [c_int]
db_alloc_handle_array.restype = POINTER(dbHandle)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 13
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_array'):
continue
db_alloc_index_array = _lib.db_alloc_index_array
db_alloc_index_array.argtypes = [c_int]
db_alloc_index_array.restype = POINTER(dbIndex)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 14
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_columns'):
continue
db_alloc_index_columns = _lib.db_alloc_index_columns
db_alloc_index_columns.argtypes = [POINTER(dbIndex), c_int]
db_alloc_index_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 15
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_string_array'):
continue
db_alloc_string_array = _lib.db_alloc_string_array
db_alloc_string_array.argtypes = [c_int]
db_alloc_string_array.restype = POINTER(dbString)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 16
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_table'):
continue
db_alloc_table = _lib.db_alloc_table
db_alloc_table.argtypes = [c_int]
db_alloc_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 17
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_string'):
continue
db_append_string = _lib.db_append_string
db_append_string.argtypes = [POINTER(dbString), String]
db_append_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 18
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_errors'):
continue
db_auto_print_errors = _lib.db_auto_print_errors
db_auto_print_errors.argtypes = [c_int]
db_auto_print_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 19
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_protocol_errors'):
continue
db_auto_print_protocol_errors = _lib.db_auto_print_protocol_errors
db_auto_print_protocol_errors.argtypes = [c_int]
db_auto_print_protocol_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 20
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_bind_update'):
continue
db_bind_update = _lib.db_bind_update
db_bind_update.argtypes = [POINTER(dbCursor)]
db_bind_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 21
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_calloc'):
continue
db_calloc = _lib.db_calloc
db_calloc.argtypes = [c_int, c_int]
db_calloc.restype = POINTER(c_ubyte)
db_calloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 22
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_alloc'):
continue
db_CatValArray_alloc = _lib.db_CatValArray_alloc
db_CatValArray_alloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_alloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 23
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_realloc'):
continue
db_CatValArray_realloc = _lib.db_CatValArray_realloc
db_CatValArray_realloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_realloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 24
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_free'):
continue
db_CatValArray_free = _lib.db_CatValArray_free
db_CatValArray_free.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_free.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 25
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_init'):
continue
db_CatValArray_init = _lib.db_CatValArray_init
db_CatValArray_init.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_init.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 26
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort'):
continue
db_CatValArray_sort = _lib.db_CatValArray_sort
db_CatValArray_sort.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 27
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort_by_value'):
continue
db_CatValArray_sort_by_value = _lib.db_CatValArray_sort_by_value
db_CatValArray_sort_by_value.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort_by_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 28
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value'):
continue
db_CatValArray_get_value = _lib.db_CatValArray_get_value
db_CatValArray_get_value.argtypes = [POINTER(dbCatValArray), c_int, POINTER(POINTER(dbCatVal))]
db_CatValArray_get_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 29
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_int'):
continue
db_CatValArray_get_value_int = _lib.db_CatValArray_get_value_int
db_CatValArray_get_value_int.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_int)]
db_CatValArray_get_value_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 30
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_double'):
continue
db_CatValArray_get_value_double = _lib.db_CatValArray_get_value_double
db_CatValArray_get_value_double.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_double)]
db_CatValArray_get_value_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 32
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_char_to_lowercase'):
continue
db_char_to_lowercase = _lib.db_char_to_lowercase
db_char_to_lowercase.argtypes = [String]
db_char_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 33
for _lib in six.itervalues(_libs):
if not hasattr(_lib, | |
<gh_stars>1-10
"""
List of command codes and handler functions for commands sent to and
from the alarm panel, plus code to tect mappings.
"""
from concord_helpers import BadMessageException, ascii_hex_to_byte
from concord_tokens import decode_text_tokens
from concord_alarm_codes import ALARM_CODES
STAR = 0xa
HASH = 0xb
KEYPRESS_CODES = {
0x00: '0',
0x01: '1',
0x02: '2',
0x03: '3',
0x04: '4',
0x05: '5',
0x06: '6',
0x07: '7',
0x08: '8',
0x09: '9',
0x0a: '*',
0x0b: '#',
0x0c: 'Police Panic',
0x0d: 'Aux. Panic',
0x0e: 'Fire Panic',
0x10: 'Lights On',
0x11: 'Lights Off',
0x12: 'Lights Toggle',
0x13: 'Keyswitch On',
0x14: 'Keyswitch Off',
0x15: 'Keyswitch Toggle (not implemented)',
# 0x16 -> 0x1b are undefined
0x1c: 'Fire TP - Acknowledge',
0x1d: 'Fire TP - Silence',
0x1e: 'Fire TP - Fire Test',
0x1f: 'Fire TP - Smoke Reset',
0x20: 'Keyfob Disarm',
0x21: 'Keyfob Arm',
0x22: 'Keyfob Lights',
0x23: 'Keyfob Star',
0x24: 'Keyfob Arm/Disarm',
0x25: 'Keyfob Lights/Star',
0x26: 'Keyfob Long Lights',
0x27: 'Keyfob Direct Arm to Level 3',
0x28: 'Keyfob Direct Arm to Level 2',
0x29: 'Keyfob Arm/Star',
0x2a: 'Keyfob Disarm/Lights',
# No 0x2b
0x2c: 'TP A Key',
0x30: 'TP B Key',
0x2d: 'TP C Key',
0x33: 'TP D Key',
0x2e: 'TP E Key',
0x36: 'TP F Key',
}
# Protocol docs say: "Bit 6 = held for a few seconds"; not sure how to
# interpret that here; I think it means all the keyfob codes, plus TP
# C & E keys.
CAPABILITY_CODES = {
# Code -> (Description, Optional Data Description)
0x00: ("Power Supervision", None),
0x01: ("Access Control", None),
0x02: ("Analog Snmoke", None),
0x03: ("Audio Listen-In", None),
0x04: ("SnapCard Supervision", None),
0x05: ("Microburst", None),
0x06: ("Dual Phone Line", None),
0x07: ("Energy Management", None),
0x08: ("Input Zones", "Number of inputs"),
0x09: ("Phast/Automation/System Manager", None),
0x00: ("Phone Interface", None),
0x0b: ("Relay Outputs", "Number of outputs"),
0x0c: ("RF Receiver", None),
0x0d: ("RF Transmitter", None),
0x0e: ("Parallel Printer", None),
0x0f: ("Unknown", None),
0x10: ("LED Touchpad", None),
0x11: ("1-Line/2-Line/BLT Touchpad", None),
0x12: ("GUI Touchpad", None),
0x13: ("Voice Evacuation", None),
0x14: ("Pager", None),
0x15: ("Downloadable Code/Data", None),
0x16: ("JTECH Premise Pager", None),
0x17: ("Cryptography", None),
0x18: ("LED Display", None),
}
PANEL_TYPES = {
0x14: "Concord",
0x0b: "Concord Express",
0x1e: "Concord Express 4",
0x0e: "Concord Euro",
0x0d: "Advent Commercial Fire 250",
0x0f: "Advent Home Navigator 132",
0x10: "Advent Commercial Burg 250",
0x11: "Advent Home Navigator 250",
0x15: "Advent Commercial Burg 500",
0x16: "Advent Commercial Fire 500",
0x17: "Advent Commercial Fire 132",
0x18: "Advent Commercial Burg 132",
}
PANEL_TYPES_CONCORD = (0x14, 0x0b, 0x1e, 0x0e)
def ck_msg_len(msg, cmd, desired_len, exact_len=True):
"""
*desired_len* is the length value that would be in the 'last
index' byte at the start of the message; actual number of bytes
will be +1 to account for the length.
If *exact_len* is True, message must be exactly the desired
length, otherwise it must be _at least_ the desired length.
"""
if not exact_len:
comp = 'at least'
bad_len = len(msg) < desired_len + 1
else:
comp = 'exactly'
bad_len = len(msg) != desired_len + 1
if bad_len:
raise BadMessageException("Message too short for command %r, expected %s %d but got %d" % \
(cmd, comp, desired_len, len(msg)-1))
def bytes_to_num(data):
""" *data* must be at least 4 bytes long, big-endian order. """
assert len(data) >= 4
num = data[3]
num += ((data[2] << 8) & 0xff00)
num += ((data[1] << 16) & 0xff0000)
num += ((data[0] << 24) & 0xff000000)
return num
def num_to_bytes(num):
return [ 0xff & (num >> 24), 0xff & (num >> 16), 0xff & (num >> 8), 0xff & num ]
def cmd_panel_type(msg):
ck_msg_len(msg, 0x01, 0x0b)
assert msg[1] == 0x01, "Unexpected command type 0x02x" % msg[1]
panel_type = msg[2]
d = { 'panel_type': PANEL_TYPES.get(panel_type, "Unknown Panel Type 0x%02x" % panel_type) }
if panel_type in PANEL_TYPES_CONCORD:
# Interpret Concord hw/sw revision numbers.
# Really not sure about this. XXX
d['is_concord'] = True
# Example (hex):
# 0b0114040716690003834575 -- Jesse's system -- Panel type command
# 0b = command len
# 01 = command code
# 14 = panel type, (= Concord)
# 04 = HW rev high (= 'D')
# 07 = HW rev low (= 7)
# --> HW Rev = D7
# 16 = SW rev high
# 69 = SW rev low
# --> ?
# 00 03 83 45 = Serial number
# 75 = Checksum
# My panel:
# HW Rev = G1
# SW Rev = 327680 = 0x050000
# Serial number = 19419753
# Hw rev is letter/digit pair, first byte represents 'A' as 1,
# Second byte represents '0' as 0.
if 0 < msg[3] < 27:
letter = chr(ord('A')-1+msg[3])
else:
letter = '?'
if 0 <= msg[4] <= 9:
digit = chr(ord('0')+msg[4])
else:
digit = '?'
hw_rev = letter + digit
sw_rev = (msg[5] << 8) + msg[6]
else:
d['is_concord'] = False
hw_rev = "%d.%d" % (msg[3], msg[4])
sw_rev = "%d.%d" % (msg[5], msg[6])
d['hardware_revision'] = hw_rev
d['software_revision'] = sw_rev
d['serial_number'] = bytes_to_num(msg[7:])
return d
def cmd_automation_event_lost(msg):
"""
(From protocol docs) Panel's automation buffer has overflowed.
Automation modules should respond to this with request for Dynamic
Data Refresh and Full Equipment List Request.
"""
return { }
TRIPPED = 'Tripped'
FAULTED = 'Faulted'
ALARM = 'Alarm'
TROUBLE = 'Trouble'
BYPASSED = 'Bypassed'
ZONE_STATES = {
0x01: TRIPPED,
0x02: FAULTED,
0x04: ALARM,
0x08: TROUBLE,
0x10: BYPASSED,
}
def build_state_list(state_code, state_dict):
states = [ ]
for bitval, state_name in sorted(state_dict.iteritems()):
if bitval & state_code:
states.append(state_name)
return states
# Concord zone types only
ZONE_TYPES = {
0: 'Hardwired',
1: 'RF',
2: 'RF Touchpad',
}
def cmd_zone_status(msg):
ck_msg_len(msg, 0x21, 0x07)
assert msg[1] == 0x21, "Unexpected command type 0x02x" % msg[1]
d = { 'partition_number': msg[2],
'area_number': msg[3],
'zone_number': (msg[4] << 8) + msg[5],
'zone_state': build_state_list(msg[6], ZONE_STATES)
}
return d;
def cmd_zone_data(msg):
ck_msg_len(msg, 0x03, 0x09, exact_len=False)
assert msg[1] == 0x03, "Unexpected command type 0x02x" % msg[1]
d = { 'partition_number': msg[2],
'area_number': msg[3],
'group_number': msg[4],
'zone_number': (msg[5] << 8) + msg[6],
'zone_type': ZONE_TYPES.get(msg[7], 'Unknown'),
'zone_state': build_state_list(msg[8], ZONE_STATES),
'zone_text': '',
'zone_text_tokens': [ ],
}
if len(msg) > 0x09 + 1:
d['zone_text'] = decode_text_tokens(msg[9:-1])
d['zone_text_tokens'] = msg[9:-1]
return d;
# Concord user number values only.
USER_NUMBERS = {
246: "System Master Code",
247: "Installer Code",
248: "Dealer Code",
249: "AVM Code",
250: "Quick Arm",
251: "Key Switch Arm",
252: "System",
}
ARMING_LEVELS = {
0: 'Zone Test',
1: 'Off',
2: 'Home/Perimeter',
3: 'Away/Full',
4: 'Night',
5: 'Silent',
}
def cmd_arming_level(msg):
ck_msg_len(msg, (0x22, 0x01), 0x08)
assert (msg[1], msg[2]) == (0x22, 0x01), "Unexpected command type"
d = { 'partition_number': msg[3],
'area_number': msg[4],
'is_keyfob': msg[5] > 0,
'user_number_high': msg[5],
'user_number_low': msg[6],
}
un = msg[6]
if un in USER_NUMBERS:
user_num = USER_NUMBERS[un]
elif un <= 229:
user_num = 'Regular User %d' % un
elif 230 <= un <= 237:
user_num = 'Partition %d Master Code' % (un - 230)
elif 238 <= un <= 245:
user_num = 'Partition %d Duress Code' % (un - 238)
else:
user_num = 'Unknown Code'
d['user_info'] = user_num
d['arming_level'] = ARMING_LEVELS.get(msg[7], 'Unknown Arming Level')
d['arming_level_code'] = msg[7]
return d
def decode_alarm_type(gen_code, spec_code):
if gen_code not in ALARM_CODES:
return 'Unknown', 'Unknown'
gen_type, spec_type_dict = ALARM_CODES[gen_code]
return gen_type, spec_type_dict.get(spec_code, 'Unknown')
def cmd_entry_exit_delay(msg):
assert (msg[1], msg[2]) == (0x22, 0x03), "Unexpected command type"
ck_msg_len(msg, (0x22, 0x03), 0x08)
d = { 'partition_number': msg[3],
'area_number': msg[4],
'delay_seconds': bytes_to_num([0, 0, msg[6], msg[7]]),
}
flags = msg[5]
bits54 = (flags >> 4) & 0x3
bit6 = (flags >> 5) & 1
bit7 = (flags >> 6) & 1
v = [ ]
if bits54 == 0:
v.append('standard')
elif bits54 == 1:
v.append('extended')
elif bits54 == 2:
v.append('twice extended')
if bit6 == 1:
v.append('exit delay')
else:
v.append('entry delay')
if bit7 == 1:
v.append('end delay')
else:
v.append('start delay')
d['delay_flags'] = v
return d;
# Concord sources
ALARM_SOURCE_TYPE = {
0: "Bus Device",
1: "Local Phone",
2: "Zone",
3: "System",
4: "Remote Phone",
}
# Reverse map of alarm source name to type code
ALARM_SOURCE_NAME = dict((v, k) for k, v in ALARM_SOURCE_TYPE.iteritems())
def cmd_alarm_trouble(msg):
assert (msg[1], msg[2]) == (0x22, 0x02), "Unexpected command type"
ck_msg_len(msg, (0x22, 0x02), 0x0d)
d = { 'partition_number': msg[3],
'area_number': | |
import sys
import argparse
from .version import VERSION
import string, random
import re
# This function tryes different methods to download the web page
def gethtml(url, headers = None, retry = True):
# Delay importing libraries
import urllib3
import requests
urllib3.disable_warnings()
try:
if headers is None:
headers = {}
html = requests.get(url, headers = headers, verify=False)
if (html.status_code != 200):
print('could not retrieve url {}'.format(url))
return None
html = html.content
except Exception as e:
if retry:
html = gethtml("https://{}".format(url), headers = headers, retry = False)
if html is None:
html = gethtml("http://{}".format(url), headers = headers, retry = False)
else:
print('could not retrieve url {}'.format(sys.argv[1]))
return None
return html
# This class simply stores the variable name using our format
class StoreVar(argparse._AppendAction):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, nargs=1, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
super().__call__(parser, namespace, "%#{}#".format(values[0]), option_string)
# The funcion explodes the variables and its values from a query string, and returns them as a dictionary
def getvars(q):
vars = {}
if q is None: return vars
varvalues = q.split('&')
for vv in varvalues:
vv = vv.split('=') + [ "" ]
vars[vv[0]] = vv[1]
return vars
# Shorthand to print "" is value is none
def nprint(v):
print("" if v is None else v)
# Shorthand to return "" is value is none
def rprint(v):
return ("" if v is None else v)
# A simple id generator; returns a string
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return "<{}>".format(''.join(random.choice(chars) for _ in range(size)))
# The main function for urlf command
def urlf():
# Delay importing libraries
from urllib.parse import urlparse
parser = argparse.ArgumentParser()
parser.add_argument("-U", "--url", action="append_const", const="%U", dest="format", help="the input url as is")
parser.add_argument("-s", "--scheme", action="append_const", const="%s", dest="format", help="shows the scheme of the url (e.g. https)")
parser.add_argument("-u", "--username", action="append_const", const="%u", dest="format", help="shows the username to accede to the url (i.e. user in https://user@pass:myserver.com)")
parser.add_argument("-w", "--password", action="append_const", const="%w", dest="format", help="shows the password to accede to the url (i.e. pass in https://user@pass:myserver.com)")
parser.add_argument("-H", "--hostname", action="append_const", const="%H", dest="format", help="shows the hostname in the url (i.e. myserver.com in https://myserver.com/my/path)")
parser.add_argument("-p", "--port", action="append_const", const="%p", dest="format", help="shows the port in the url (i.e. 443 in https://myserver.com:443/my/path)")
parser.add_argument("-P", "--path", action="append_const", const="%P", dest="format", help="shows the path in the url (i.e. my/path in https://myserver.com/my/path)")
parser.add_argument("-q", "--query", action="append_const", const="%q", dest="format", help="shows the query in the url (i.e. q=1&r=2 in https://myserver.com/my/path?q=1&r=2)")
parser.add_argument("-m", "--parameters", action="append_const", const="%m", dest="format", help="shows the parameters to accede to the url (i.e. param in https://myserver.com/my/path;param?q=1&r=2)")
parser.add_argument("-f", "--fragment", action="append_const", const="%f", dest="format", help="shows the fragment in the url (i.e. sec1 in https://myserver.com/my/path#sec1)")
parser.add_argument("-v", "--var", action=StoreVar, metavar="var name", dest="format", help="show the value of a var in the query string (this parameter may appear multiple times)", type=str)
parser.add_argument("-j", "--join-string", action="store", dest="separator", default=" ", help="character (or string) used to separate the different fields (default: <blank space>)", type=str)
parser.add_argument("-F", "--format-string", action="store", metavar="format string", dest="fmtstring", default=None, help="user defined format string", type=str)
parser.add_argument("-V", "--version", action="version", version="%(prog)s {}".format(VERSION))
parser.add_argument("urls", nargs="+", help="the urls to be formatted", type=str)
args = parser.parse_args()
# This way we will interpret escaped strings like \n
args.separator = args.separator.encode("utf-8").decode('unicode-escape')
# If provided a custom format string, let's ignore the rest
if args.fmtstring is None:
if args.format is None:
format_string="%U"
else:
format_string = args.separator.join(args.format)
else:
format_string = args.fmtstring
id1 = "#" + id_generator() + "#"
linejoin = "\n"
# If the file name is '-' read from the stdin
if args.urls[0] == "-":
urls = map(lambda x: x.decode(), sys.stdin.buffer.readlines())
else:
urls = args.urls
result = []
for url in urls:
url=url.strip()
parsed = urlparse(url)
# Save the %%
result_string = format_string.replace("%%", id1)
# Replace the special values
result_string = result_string.replace("%U",rprint(url))
result_string = result_string.replace("%s",rprint(parsed.scheme))
result_string = result_string.replace("%u",rprint(parsed.username))
result_string = result_string.replace("%w",rprint(parsed.password))
result_string = result_string.replace("%H",rprint(parsed.hostname))
result_string = result_string.replace("%p",rprint(parsed.port))
result_string = result_string.replace("%P",rprint(parsed.path))
result_string = result_string.replace("%q",rprint(parsed.query))
result_string = result_string.replace("%m",rprint(parsed.params))
result_string = result_string.replace("%f",rprint(parsed.fragment))
# Find variables and replace their values
variables = re.finditer(r'%#(?P<varname>[^#]*)#', result_string)
vars = getvars(parsed.query)
shift = 0
if variables is not None:
for v in variables:
varname = v.group("varname")
value = rprint(vars[varname] if varname in vars else None)
result_string = result_string[:v.start("varname")-2-shift] + value + result_string[v.end("varname")+1-shift:]
size = v.end("varname") - v.start("varname") + 3 - len(str(value))
shift += size
# Restore the %%
result_string = result_string.replace(id1, "%%")
result.append(result_string)
print(linejoin.join(result))
# The main function for htmlq command
def htmlq():
# Delay importing libraries
from bs4 import BeautifulSoup as Soup
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filename", action="store", dest="filename", default=None, help="file to read (default: stdin)")
parser.add_argument("-u", "--url", action="store", dest="url", default=None, help="url to parse (it will be downloaded and queried); it is incompatible with the use of a file")
parser.add_argument("-U", "--user-agent", action="store", dest="user_agent", default=None, help="use an user agent string to get the web page from the URL")
parser.add_argument("-a", "--attr", action="append", dest="attribute", default=None, help="attributes to get from the html tags (the whole tag if not provided); may appear more than once", type=str)
parser.add_argument("-r", "--rm", action="append", dest="rmquery", default=None, help="query string of elements to remove on each element found; may appear more than once", type=str)
parser.add_argument("-s", "--separator", action="store", dest="separator", default="\0", help="character (or characters) used to separate the different results from the query (not the fields in a result) (default: 0 char)", type=str)
parser.add_argument("-S", "--field-separator", action="store", dest="fieldseparator", default=",", help="character (or characters) used to separate the value of the different attributes of an entry resulting from the query (default: ,)", type=str)
parser.add_argument("-n", "--no-empty-lines", action="store_true", dest="noemptylines", default=False, help="omits the empty resulting entries (i.e. those equal to \"\")")
parser.add_argument("-N", "--no-empty-attr", action="store_true", dest="noemptyattr", default=False, help="omits the empty values of attributes (i.e. those equal to \"\")")
parser.add_argument("-1", "--only-first", action="store_true", dest="onlyfirst", default=False, help="if multiple entries are found in the html document, show only the first one")
parser.add_argument("-V", "--version", action="version", version="%(prog)s {}".format(VERSION))
parser.add_argument("query", help="query string for the html content", type=str)
args = parser.parse_args()
# The default behaviour is to read from stdin
if args.url is None and args.filename is None:
args.filename = "-"
# It cannot be both a file and an url; if so, deactivate the url
if args.url is not None and args.filename is not None:
print("cannot parse both a file and a url. Only the file will be parsed")
args.url = None
# If it is an url, retrieve it
if args.url is not None:
headers = {}
if args.user_agent is not None:
headers["User-Agent"] = args.user_agent
html = gethtml(args.url, headers, True)
if html is None:
print("failed to get url {}".format(args.url))
sys.exit(-1)
# This enables parsing backslashes in python 3 (e.g. \n)
args.separator = args.separator.encode("utf-8").decode('unicode-escape')
args.fieldseparator = args.fieldseparator.encode("utf-8").decode('unicode-escape')
# If it is a file, read it
if args.filename is not None:
if args.filename == '-':
html = b''.join(sys.stdin.buffer.readlines())
else:
try:
html = b''.join(open(args.filename, "rb").readlines())
except:
print("failed to open file {}".format(args.filename))
sys.exit(-1)
# We accept multiple attributes as a list of comma separated
if args.attribute is not None:
args.attribute = [ b.strip() for a in args.attribute for b in a.split(',') ]
# If there is no query, there is no result
if args.query is None:
print("no query introduced")
sys.exit(0)
# If there is no html, there is no job
if html is None:
print("no input found in stdin")
sys.exit(1)
try:
soup = Soup(html, "html5lib")
# We'll select elements using the query
wpmeta = soup.select(args.query)
# If we only want the first result, let's omit the rest
if args.onlyfirst and len(wpmeta) > 0:
wpmeta = [ wpmeta[0] ]
# If wanted to remove parts of the results... let's remove them
if args.rmquery is not None:
for w in wpmeta:
for rm in args.rmquery:
m = w.find(rm)
if m is not None: m.decompose()
# If no args requested, we want the whole html tag
if args.attribute is None:
result = [ str(w) for w in wpmeta ]
else:
# Otherwise process the parameters
result = []
for w in wpmeta:
result_i = []
for a in args.attribute:
r = None
if a == '.':
r = w.string
else:
if w.has_attr(a):
r = w[a]
if isinstance(r, list): r = " ".join(r)
if r is None: r = ""
result_i.append(r)
# Remove emtpy attrs if requested
| |
<gh_stars>0
#!/usr/bin/python3
import os
import shutil
import sys
import math
# import copy
import numpy as np
import Tool_Box
from ArgParsing import parse_clubb_args
import Supporting as sp
def main():
sp.log(msg="# Parsing and checking input arguments\n", level="STEP")
args = parse_clubb_args()
sp.log(msg="# Reading the combined BB file\n", level="STEP")
combo, samples = readBB(args["bbfile"])
sp.log(msg="# Format data to cluster\n", level="STEP")
points, bintoidx = getPoints(data=combo, samples=samples)
clouds = None
if args["cloud"] > 0:
sp.log(msg="# Bootstrap each bin for clustering\n", level="STEP")
clouds = \
generateClouds(points=points, density=args["cloud"], seed=args["seed"], sdeven=args["ratiodeviation"],
sdodd=args["bafdeviation"])
sp.log(msg="# Clustering bins by RD and BAF across tumor samples\n", level="STEP")
mus, sigmas, clusterAssignments, numPoints, numClusters = \
cluster(points=points, output=args["outsegments"], samples=samples, clouds=clouds, K=args["initclusters"],
sf=args["tuning"], restarts=args['restarts'], bnpydir=args["bnpydir"])
if args['rdtol'] > 0.0 or args['baftol'] > 0.0:
sp.log(msg="# Refining clustering using given tolerances\n", level="STEP")
before = len(set(clusterAssignments))
clusterAssignments, numClusters = \
refineClustering(combo=combo, assign=clusterAssignments, assignidx=bintoidx, samples=samples,
rdtol=args['rdtol'], baftol=args['baftol'])
sp.log(msg='The number of clusters have been reduced from {} to {} with given tolerances\n'
.format(before, numClusters), level='INFO')
# names = list(samples).sort()
sp.log(msg="# Writing BBC output with resulting clusters\n", level="STEP")
if args["outbins"] is None:
outbins = sys.stdout
else:
outbins = open(args["outbins"], 'w')
outbins.write("#CHR\tSTART\tEND\tSAMPLE\tRD\t#SNPS\tCOV\tALPHA\tBETA\tBAF\tCLUSTER\n")
for key in sorted(combo, key=(lambda x: (sp.numericOrder(x[0]), int(x[1]), int(x[2])))):
for sample in combo[key]:
outbins.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n"
.format(key[0], key[1], key[2], sample[0], sample[1], sample[2], sample[3], sample[4],
sample[5], sample[6], clusterAssignments[bintoidx[key]]))
sp.log(msg="# Segmenting bins\n", level="STEP")
clusters = {cluster: set(key for key in combo if clusterAssignments[bintoidx[key]] == cluster) for cluster in set(clusterAssignments)}
segments = segmentBins(bb=combo, clusters=clusters, samples=samples)
if args["diploidbaf"]:
sp.log(msg="# Determining the largest cluster as diploid or tetraploid and rescale all the clusters inside "
"the threshold accordingly\n", level="STEP")
segments = scaleBAF(segments=segments, samples=samples, diploidbaf=args["diploidbaf"])
sp.log(msg="# Writing REF output with resulting segments\n", level="STEP")
if not args["outsegments"]:
outsegments = sys.stdout
else:
outsegments = open(args["outsegments"], 'w')
outsegments.write("#ID\tSAMPLE\t#BINS\tRD\t#SNPS\tCOV\tALPHA\tBETA\tBAF\n")
for key in sorted(segments):
for sample in segments[key]:
record = segments[key][sample]
outsegments.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n"
.format(key, sample, record[0], record[1], record[2], record[3], record[4], record[5],
record[6]))
def readBB(bbfile):
read = {}
samples = set()
with open(bbfile, 'r') as f:
for line in f:
if line[0] != '#':
parsed = line.strip().split()
chromosome = parsed[0]
start = int(parsed[1])
end = int(parsed[2])
sample = parsed[3]
rd = float(parsed[4])
snps = int(parsed[5])
cov = float(parsed[6])
alpha = int(parsed[7])
beta = int(parsed[8])
baf = float(parsed[9])
samples.add(sample)
try:
read[chromosome, start, end].append((sample, rd, snps, cov, alpha, beta, baf))
except KeyError:
read[chromosome, start, end] = [(sample, rd, snps, cov, alpha, beta, baf)]
return read, samples
def getPoints(data, samples):
idx = 0
points = []
bintoidx = {}
for bi in sorted(data, key=(lambda x: (sp.numericOrder(x[0]), int(x[1]), int(x[2])))):
partition = {}
for x in data[bi]:
if x[0] in partition:
raise ValueError(sp.error("Found a bin ({}, {}) in chromosome {} defined multiple times for the same "
"sample!".format(bi[1], bi[2], bi[0])))
else:
partition[x[0]] = [x[1], x[-1]]
if len(partition) != len(samples):
raise ValueError(sp.error("Found a bin ({}, {}) in chromosome {} that is not covered in all the samples!"
.format(bi[1], bi[2], bi[0])))
points.append([e for sample in samples for e in partition[sample]])
bintoidx[bi] = idx
idx += 1
return points, bintoidx
def cluster(points, output, samples, clouds=None, K=15, sf=0.01, restarts=10, bnpydir=None):
"""
Clusters a set of data points lying in an arbitrary number of clusters.
Arguments:
data (list of lists of floats): list of data points to be clustered.
sampleName (string): The name of the input sample.
sf (float): Tuning parameter for clustering; used to determine initial size of
distribution covariances. Small sf indicates a belief that clusters
are of small size.
Returns:
mus (list of lists of floats): List of cluster means.
sigmas (list of 2D lists of floats): List of cluster covariances.
clusterAssignments (list of ints): The assignment of each interval to a cluster, where an entry
j at index i means the ith interval has been assigned to the
jth meta-interval.
numPoints (list of ints): Number of points assigned to each cluster
numClusters (int): The number of clusters.
"""
sp.log(msg="## Loading BNPY\n", level="INFO")
tmp = os.path.splitext(output)[0] + "_TMPDIR/"
if os.path.exists(tmp):
shutil.rmtree(tmp)
os.makedirs(tmp)
os.environ["BNPYOUTDIR"] = tmp
sys.path.append(bnpydir)
import bnpy
sp.log(msg="## Clustering...\n", level="INFO")
total = list(points)
if clouds:
total.extend(list(clouds))
npArray = np.array(total)
Data = bnpy.data.XData(X=npArray)
Data.name = "Clustering tumor samples by RD and BAF"
Data.summary = "Clustering the following samples: {}".format(",".join(samples))
if Data.X.shape[0] < K:
K = Data.X.shape[0]
if hasattr(bnpy.learnalg, "MOVBBirthMergeAlg"):
hmodel, Info = bnpy.run(Data, 'DPMixtureModel', 'DiagGauss', 'moVB', nLap=100, nTask=restarts, K=K,
moves='birth,merge', ECovMat='eye', sF=sf, doWriteStdOut=False)
elif hasattr(bnpy.learnalg, "MemoVBMovesAlg"):
hmodel, Info = bnpy.run(Data, 'DPMixtureModel', 'DiagGauss', 'memoVB', nLap=100, nTask=restarts, K=K,
moves='birth,merge', ECovMat='eye', sF=sf, doWriteStdOut=False)
else:
raise ValueError(sp.error("BNPY learnalg module does not contain either MOVBBirthMergeAlg or MemoVBMovesAlg, "
"please use the right version!"))
observationModel = hmodel.obsModel
numClusters = observationModel.K
mus = [observationModel.get_mean_for_comp(k=i) for i in range(numClusters)]
sigmas = [observationModel.get_covar_mat_for_comp(k=i) for i in range(numClusters)]
target = bnpy.data.XData(X=(np.array(points)))
LP = hmodel.calc_local_params(target)
targetAssignments = np.argmax(LP['resp'], axis=1)
LP = hmodel.calc_local_params(Data)
fullAssignments = np.argmax(LP['resp'], axis=1)
numPoints = []
for i in range(numClusters):
currX = np.array([Data.X[j] for j in range(len(Data.X)) if fullAssignments[j] == i])
numPoints.append(currX.shape[0])
return mus, sigmas, targetAssignments, numPoints, numClusters
def refineClustering(combo, assign, assignidx, samples, rdtol, baftol):
assignment = {b: assign[assignidx[b]] for b in combo}
clusters = set(assignment[b] for b in assignment)
size = {c: float(sum(c == assignment[b] for b in combo)) for c in clusters}
getbaf = (lambda c, p: float(sum(e[6] for b in combo for e in combo[b] if assignment[b] == c and e[0] == p)))
baf = {c: {p: getbaf(c, p) / size[c] for p in samples} for c in clusters}
getrdr = (lambda c, p: float(sum(e[1] for b in combo for e in combo[b] if assignment[b] == c and e[0] == p)))
rdr = {c: {p: getrdr(c, p) / size[c] for p in samples} for c in clusters}
mbaf = (lambda c: {p: baf[c][p] for p in samples})
mrdr = (lambda c: {p: rdr[c][p] for p in samples})
merge = {c: {'BAF': mbaf(c), 'RDR': mrdr(c), 'SIZE': size[c], 'CLUS': {c}} for c in clusters}
def mergable(m):
checkrdr = (lambda f, s: False not in set(abs(m[f]['RDR'][p] - m[s]['RDR'][p]) <= rdtol for p in samples))
checkbaf = (lambda f, s: False not in set(abs(m[f]['BAF'][p] - m[s]['BAF'][p]) <= baftol for p in samples))
check = (lambda f, s: checkrdr(f, s) and checkbaf(f, s))
varrdr = (lambda f, s: sum(abs(m[f]['RDR'][p] - m[s]['RDR'][p]) for p in samples))
varbaf = (lambda f, s: sum(abs(m[f]['BAF'][p] - m[s]['BAF'][p]) for p in samples))
var = (lambda f, s: varrdr(f, s) + varbaf(f, s))
seq = sorted(m, key=(lambda x: m[x]['SIZE']))
for idx, f in enumerate(seq):
opts = {s: var(f, s) for s in seq[idx+1:] for p in samples if s != f and check(f, s)}
if len(opts) > 0:
return f, sp.argmin(opts)
# first = f
# break
return None
# if len(opts) > 0:
# f = first
# s = sp.argmin(opts)
# return first, sp.argmin(opts)
# else:
# None
m = mergable(merge)
while m is not None:
m1 = m[0]
m2 = m[1]
tot = float(merge[m1]['SIZE'] + merge[m2]['SIZE'])
newbaf = {p: float(merge[m1]['BAF'][p] * merge[m1]['SIZE'] + merge[m2]['BAF'][p] * merge[m2]['SIZE']) / tot for p in samples}
newrdr = {p: float(merge[m1]['RDR'][p] * merge[m1]['SIZE'] + merge[m2]['RDR'][p] * merge[m2]['SIZE']) / tot for p in samples}
newclu = merge[m1]['CLUS'] | merge[m2]['CLUS']
merge = {c: merge[c] for c in merge if c != m1 and c != m2}
if len(merge) == 0:
merge = {}
merge[m1] = {'BAF': newbaf, 'RDR': newrdr, 'SIZE': tot, 'CLUS': newclu}
m = mergable(merge)
newassign = [-1 for i in range(len(assign))]
for b in combo:
get = [c for c in merge if assign[assignidx[b]] in merge[c]['CLUS']]
assert len(get) == 1
newassign[assignidx[b]] = get[0]
assert -1 not in set(newassign)
return newassign, len(merge)
def generateClouds(points, density, seed, sdeven=0.02, sdodd=0.02):
res = []
resappend = res.append
for point in points:
# np.random.seed(seed=seed)
np.random.seed(seed)
for d in range(density):
normal = np.random.normal
newpoint = [normal(point[i], sdeven) if i%2==0 else normal(point[i], sdodd) for i in range(len(point))]
resappend(newpoint)
return res
def segmentBins(bb, clusters, samples):
sbb = {bi: {record[0]: record[1:] for record in bb[bi]} for bi in bb}
nbins = {cluster: {sample: len(clusters[cluster]) for sample in samples} for cluster in clusters}
rd = {cluster: {sample: float(sum(sbb[bi][sample][0] for bi in clusters[cluster])) / float(len(clusters[cluster])) for sample in samples} | |
generator dispatch by default
or according to the distribution scheme provided in ``slack_weights``.
If ``False`` only the slack generator takes up the slack.
slack_weights : pandas.Series|str, default 'p_set'
Distribution scheme describing how to determine the fraction of the total slack power
a bus of the subnetwork takes up. Default is to distribute proportional to generator dispatch
('p_set'). Another option is to distribute proportional to (optimised) nominal capacity ('p_nom' or 'p_nom_opt').
Custom weights can be provided via a pandas.Series/dict
that has the buses or the generators of the subnetwork as index/keys.
When using custom weights with buses as index/keys the slack power of a bus is distributed
among its generators in proportion to their nominal capacity (``p_nom``) if given, otherwise evenly.
Returns
-------
Tuple of three pandas.Series indicating number of iterations,
remaining error, and convergence status for each snapshot
"""
assert isinstance(slack_weights, (str, pd.Series, dict)), "Type of 'slack_weights' must be string, pd.Series or dict. Is {}.".format(type(slack_weights))
if isinstance(slack_weights, dict):
slack_weights = pd.Series(slack_weights)
elif isinstance(slack_weights, str):
valid_strings = ['p_nom', 'p_nom_opt', 'p_set']
assert slack_weights in valid_strings, "String value for 'slack_weights' must be one of {}. Is {}.".format(valid_strings, slack_weights)
snapshots = _as_snapshots(sub_network.network, snapshots)
logger.info("Performing non-linear load-flow on {} sub-network {} for snapshots {}".format(sub_network.network.sub_networks.at[sub_network.name,"carrier"], sub_network, snapshots))
# _sub_network_prepare_pf(sub_network, snapshots, skip_pre, calculate_Y)
network = sub_network.network
if not skip_pre:
calculate_dependent_values(network)
find_bus_controls(sub_network)
_allocate_pf_outputs(network, linear=False)
# get indices for the components on this subnetwork
branches_i = sub_network.branches_i()
buses_o = sub_network.buses_o
sn_buses = sub_network.buses().index
sn_generators = sub_network.generators().index
generator_slack_weights_b = False
bus_slack_weights_b = False
if isinstance(slack_weights, pd.Series):
if all(i in sn_generators for i in slack_weights.index):
generator_slack_weights_b = True
elif all(i in sn_buses for i in slack_weights.index):
bus_slack_weights_b = True
else:
raise AssertionError("Custom slack weights pd.Series/dict must only have the",
"generators or buses of the subnetwork as index/keys.")
if not skip_pre and len(branches_i) > 0:
calculate_Y(sub_network, skip_pre=True)
_calculate_controllable_nodal_power_balance(sub_network, network, snapshots, buses_o)
def f(guess, distribute_slack=False, slack_weights=None):
last_pq = -1 if distribute_slack else None
network.buses_t.v_ang.loc[now,sub_network.pvpqs] = guess[:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[now,sub_network.pqs] = guess[len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[now,buses_o]
v_ang = network.buses_t.v_ang.loc[now,buses_o]
V = v_mag_pu*np.exp(1j*v_ang)
if distribute_slack:
slack_power = slack_weights*guess[-1]
mismatch = V*np.conj(sub_network.Y*V) - s + slack_power
else:
mismatch = V*np.conj(sub_network.Y*V) - s
if distribute_slack:
F = r_[real(mismatch)[:],imag(mismatch)[1+len(sub_network.pvs):]]
else:
F = r_[real(mismatch)[1:],imag(mismatch)[1+len(sub_network.pvs):]]
return F
def dfdx(guess, distribute_slack=False, slack_weights=None):
last_pq = -1 if distribute_slack else None
network.buses_t.v_ang.loc[now,sub_network.pvpqs] = guess[:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[now,sub_network.pqs] = guess[len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[now,buses_o]
v_ang = network.buses_t.v_ang.loc[now,buses_o]
V = v_mag_pu*np.exp(1j*v_ang)
index = r_[:len(buses_o)]
#make sparse diagonal matrices
V_diag = csr_matrix((V,(index,index)))
V_norm_diag = csr_matrix((V/abs(V),(index,index)))
I_diag = csr_matrix((sub_network.Y*V,(index,index)))
dS_dVa = 1j*V_diag*np.conj(I_diag - sub_network.Y*V_diag)
dS_dVm = V_norm_diag*np.conj(I_diag) + V_diag * np.conj(sub_network.Y*V_norm_diag)
J10 = dS_dVa[1+len(sub_network.pvs):,1:].imag
J11 = dS_dVm[1+len(sub_network.pvs):,1+len(sub_network.pvs):].imag
if distribute_slack:
J00 = dS_dVa[:,1:].real
J01 = dS_dVm[:,1+len(sub_network.pvs):].real
J02 = csr_matrix(slack_weights,(1,1+len(sub_network.pvpqs))).T
J12 = csr_matrix((1,len(sub_network.pqs))).T
J_P_blocks = [J00, J01, J02]
J_Q_blocks = [J10, J11, J12]
else:
J00 = dS_dVa[1:,1:].real
J01 = dS_dVm[1:,1+len(sub_network.pvs):].real
J_P_blocks = [J00, J01]
J_Q_blocks = [J10, J11]
J = svstack([
shstack(J_P_blocks),
shstack(J_Q_blocks)
], format="csr")
return J
#Set what we know: slack V and v_mag_pu for PV buses
v_mag_pu_set = get_switchable_as_dense(network, 'Bus', 'v_mag_pu_set', snapshots)
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pvs] = v_mag_pu_set.loc[:,sub_network.pvs]
network.buses_t.v_mag_pu.loc[snapshots,sub_network.slack_bus] = v_mag_pu_set.loc[:,sub_network.slack_bus]
network.buses_t.v_ang.loc[snapshots,sub_network.slack_bus] = 0.
if not use_seed:
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pqs] = 1.
network.buses_t.v_ang.loc[snapshots,sub_network.pvpqs] = 0.
slack_args = {'distribute_slack': distribute_slack}
slack_variable_b = 1 if distribute_slack else 0
if distribute_slack:
if isinstance(slack_weights, str) and slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
bus_generation = generators_t_p_choice.rename(columns=network.generators.bus)
slack_weights_calc = pd.DataFrame(bus_generation.groupby(bus_generation.columns, axis=1).sum(), columns=buses_o).apply(normed, axis=1).fillna(0)
elif isinstance(slack_weights, str) and slack_weights in ['p_nom', 'p_nom_opt']:
assert not all(network.generators[slack_weights]) == 0, "Invalid slack weights! Generator attribute {} is always zero.".format(slack_weights)
slack_weights_calc = network.generators.groupby('bus').sum()[slack_weights].reindex(buses_o).pipe(normed).fillna(0)
elif generator_slack_weights_b:
# convert generator-based slack weights to bus-based slack weights
slack_weights_calc = slack_weights.rename(network.generators.bus).groupby(slack_weights.index.name).sum().reindex(buses_o).pipe(normed).fillna(0)
elif bus_slack_weights_b:
# take bus-based slack weights
slack_weights_calc = slack_weights.reindex(buses_o).pipe(normed).fillna(0)
ss = np.empty((len(snapshots), len(buses_o)), dtype=complex)
roots = np.empty((len(snapshots), len(sub_network.pvpqs) + len(sub_network.pqs) + slack_variable_b))
iters = pd.Series(0, index=snapshots)
diffs = pd.Series(index=snapshots, dtype=float)
convs = pd.Series(False, index=snapshots)
for i, now in enumerate(snapshots):
p = network.buses_t.p.loc[now,buses_o]
q = network.buses_t.q.loc[now,buses_o]
ss[i] = s = p + 1j*q
#Make a guess for what we don't know: V_ang for PV and PQs and v_mag_pu for PQ buses
guess = r_[network.buses_t.v_ang.loc[now,sub_network.pvpqs],network.buses_t.v_mag_pu.loc[now,sub_network.pqs]]
if distribute_slack:
guess = np.append(guess, [0]) # for total slack power
if isinstance(slack_weights, str) and slack_weights == 'p_set':
# snapshot-dependent slack weights
slack_args["slack_weights"] = slack_weights_calc.loc[now]
else:
slack_args["slack_weights"] = slack_weights_calc
#Now try and solve
start = time.time()
roots[i], n_iter, diff, converged = newton_raphson_sparse(f, guess, dfdx, x_tol=x_tol, **slack_args)
logger.info("Newton-Raphson solved in %d iterations with error of %f in %f seconds", n_iter,diff,time.time()-start)
iters[now] = n_iter
diffs[now] = diff
convs[now] = converged
#now set everything
if distribute_slack:
last_pq = -1
slack_power = roots[:,-1]
else:
last_pq = None
network.buses_t.v_ang.loc[snapshots,sub_network.pvpqs] = roots[:,:len(sub_network.pvpqs)]
network.buses_t.v_mag_pu.loc[snapshots,sub_network.pqs] = roots[:,len(sub_network.pvpqs):last_pq]
v_mag_pu = network.buses_t.v_mag_pu.loc[snapshots,buses_o].values
v_ang = network.buses_t.v_ang.loc[snapshots,buses_o].values
V = v_mag_pu*np.exp(1j*v_ang)
#add voltages to branches
buses_indexer = buses_o.get_indexer
branch_bus0 = []; branch_bus1 = []
for c in sub_network.iterate_components(network.passive_branch_components):
branch_bus0 += list(c.df.loc[c.ind, 'bus0'])
branch_bus1 += list(c.df.loc[c.ind, 'bus1'])
v0 = V[:,buses_indexer(branch_bus0)]
v1 = V[:,buses_indexer(branch_bus1)]
i0 = np.empty((len(snapshots), sub_network.Y0.shape[0]), dtype=complex)
i1 = np.empty((len(snapshots), sub_network.Y1.shape[0]), dtype=complex)
for i, now in enumerate(snapshots):
i0[i] = sub_network.Y0*V[i]
i1[i] = sub_network.Y1*V[i]
s0 = pd.DataFrame(v0*np.conj(i0), columns=branches_i, index=snapshots)
s1 = pd.DataFrame(v1*np.conj(i1), columns=branches_i, index=snapshots)
for c in sub_network.iterate_components(network.passive_branch_components):
s0t = s0.loc[:,c.name]
s1t = s1.loc[:,c.name]
c.pnl.p0.loc[snapshots,s0t.columns] = s0t.values.real
c.pnl.q0.loc[snapshots,s0t.columns] = s0t.values.imag
c.pnl.p1.loc[snapshots,s1t.columns] = s1t.values.real
c.pnl.q1.loc[snapshots,s1t.columns] = s1t.values.imag
s_calc = np.empty((len(snapshots), len(buses_o)), dtype=complex)
for i in np.arange(len(snapshots)):
s_calc[i] = V[i]*np.conj(sub_network.Y*V[i])
slack_index = buses_o.get_loc(sub_network.slack_bus)
if distribute_slack:
network.buses_t.p.loc[snapshots,sn_buses] = s_calc.real[:,buses_indexer(sn_buses)]
else:
network.buses_t.p.loc[snapshots,sub_network.slack_bus] = s_calc[:,slack_index].real
network.buses_t.q.loc[snapshots,sub_network.slack_bus] = s_calc[:,slack_index].imag
network.buses_t.q.loc[snapshots,sub_network.pvs] = s_calc[:,buses_indexer(sub_network.pvs)].imag
#set shunt impedance powers
shunt_impedances_i = sub_network.shunt_impedances_i()
if len(shunt_impedances_i):
#add voltages
shunt_impedances_v_mag_pu = v_mag_pu[:,buses_indexer(network.shunt_impedances.loc[shunt_impedances_i, 'bus'])]
network.shunt_impedances_t.p.loc[snapshots,shunt_impedances_i] = (shunt_impedances_v_mag_pu**2)*network.shunt_impedances.loc[shunt_impedances_i, 'g_pu'].values
network.shunt_impedances_t.q.loc[snapshots,shunt_impedances_i] = (shunt_impedances_v_mag_pu**2)*network.shunt_impedances.loc[shunt_impedances_i, 'b_pu'].values
#let slack generator take up the slack
if distribute_slack:
distributed_slack_power = network.buses_t.p.loc[snapshots,sn_buses] - ss[:,buses_indexer(sn_buses)].real
for bus, group in sub_network.generators().groupby('bus'):
if isinstance(slack_weights, str) and slack_weights == 'p_set':
generators_t_p_choice = get_switchable_as_dense(network, 'Generator', slack_weights, snapshots)
bus_generator_shares = generators_t_p_choice.loc[snapshots,group.index].apply(normed, axis=1).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += bus_generator_shares.multiply(distributed_slack_power.loc[snapshots,bus], axis=0)
else:
if generator_slack_weights_b:
bus_generator_shares = slack_weights.loc[group.index].pipe(normed).fillna(0)
else:
bus_generators_p_nom = network.generators.p_nom.loc[group.index]
# distribute evenly if no p_nom given
if all(bus_generators_p_nom) == 0:
bus_generators_p_nom = 1
bus_generator_shares = bus_generators_p_nom.pipe(normed).fillna(0)
network.generators_t.p.loc[snapshots,group.index] += distributed_slack_power.loc[snapshots,bus].apply(lambda row: row*bus_generator_shares)
else:
network.generators_t.p.loc[snapshots,sub_network.slack_generator] += network.buses_t.p.loc[snapshots,sub_network.slack_bus] - ss[:,slack_index].real
#set the Q of the slack and PV generators
network.generators_t.q.loc[snapshots,sub_network.slack_generator] += network.buses_t.q.loc[snapshots,sub_network.slack_bus] - ss[:,slack_index].imag
network.generators_t.q.loc[snapshots,network.buses.loc[sub_network.pvs, "generator"]] += np.asarray(network.buses_t.q.loc[snapshots,sub_network.pvs] - ss[:,buses_indexer(sub_network.pvs)].imag)
return iters, diffs, convs
def network_lpf(network, snapshots=None, skip_pre=False):
"""
Linear power flow for generic network.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
Returns
-------
None
"""
_network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
def apply_line_types(network):
"""Calculate line electrical parameters x, r, b, g from standard
types.
"""
lines_with_types_b = network.lines.type != ""
if lines_with_types_b.zsum() == 0:
return
missing_types = (pd.Index(network.lines.loc[lines_with_types_b, 'type'].unique())
.difference(network.line_types.index))
assert missing_types.empty, ("The type(s) {} do(es) not exist in network.line_types"
.format(", ".join(missing_types)))
# Get a copy of the lines data
l = (network.lines.loc[lines_with_types_b, ["type", "length", "num_parallel"]]
.join(network.line_types, on='type'))
for attr in ["r","x"]:
l[attr] = l[attr + "_per_length"] * l["length"] / l["num_parallel"]
l["b"] = 2*np.pi*1e-9*l["f_nom"] * l["c_per_length"] * l["length"] * l["num_parallel"]
# now set calculated values on live lines
for attr in ["r", "x", "b"]:
network.lines.loc[lines_with_types_b, attr] = l[attr]
def apply_transformer_types(network):
"""Calculate transformer electrical parameters x, r, b, g from
standard types.
"""
trafos_with_types_b = network.transformers.type != ""
if trafos_with_types_b.zsum() == 0:
return
missing_types = (pd.Index(network.transformers.loc[trafos_with_types_b, 'type'].unique())
.difference(network.transformer_types.index))
assert missing_types.empty, ("The type(s) {} do(es) not exist in network.transformer_types"
.format(", ".join(missing_types)))
# Get a copy of the transformers data
# (joining pulls in "phase_shift", "s_nom", "tap_side" from TransformerType)
t = (network.transformers.loc[trafos_with_types_b, ["type", "tap_position", "num_parallel"]]
.join(network.transformer_types, on='type'))
t["r"] = t["vscr"] /100.
t["x"] = np.sqrt((t["vsc"]/100.)**2 - t["r"]**2)
#NB: b and g are per unit of s_nom
t["g"] = t["pfe"]/(1000. * t["s_nom"])
#for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2
t["b"] = - np.sqrt(((t["i0"]/100.)**2 - t["g"]**2).clip(lower=0))
for attr in ["r","x"]:
t[attr] /= t["num_parallel"]
for attr in ["b","g"]:
t[attr] *= t["num_parallel"]
#deal with tap positions
t["tap_ratio"] = 1. + (t["tap_position"] - t["tap_neutral"]) * (t["tap_step"]/100.)
# now set calculated values on | |
IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# SeriesTime
0x00080031L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
None: ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'SEGMENTATION IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'COLON CAD SR IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'BASIC TEXT SR IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'COMPREHENSIVE SR IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
'CHEST CAD SR IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'ENHANCED SR IOD': ['Series'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'X-RAY RADIATION DOSE SR IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'MAMMOGRAPHY CAD SR IOD': ['Series'],
'PROCEDURE LOG IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
},
# ProcedureCodeSequence
0x00081032L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'PROCEDURE LOG IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study', 'Modality Performed Procedure Step'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# ContentTime
0x00080033L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image', 'Raw Data', 'Waveform', 'Document', 'Equipment', 'Encapsulated Document', 'Spatial Registration', 'Series', 'Spatial Fiducials', 'Dose', 'Surface', 'Deformable Registration', 'Segmentation', 'Real World Value Mapping'],
'SEGMENTATION IOD': ['Segmentation'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Series', 'Document'],
'GENERAL ECG IOD': ['Waveform'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'COLON CAD SR IOD': ['Document'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'RAW DATA IOD': ['Raw Data'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'REAL WORLD VALUE MAPPING IOD': ['Real World Value Mapping'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'BASIC TEXT SR IOD': ['Document'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'COMPREHENSIVE SR IOD': ['Document'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENCAPSULATED PDF IOD': ['Encapsulated Document'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'CHEST CAD SR IOD': ['Document'],
'HEMODYNAMIC IOD': ['Waveform'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Equipment'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'12-LEAD ECG IOD': ['Waveform'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'ENCAPSULATED CDA IOD': ['Encapsulated Document'],
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
'ENHANCED SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'SURFACE SEGMENTATION IOD': ['Surface'],
'MAMMOGRAPHY CAD SR | |
# # # # # # # # Python Tuples (a,b)
# # # # # # # # Immutable - size is fixed
# # # # # # # # Use - passing data that does not need changing
# # # # # # # # Faster than list - less bookkeeping no worries about size change
# # # # # # # # "safer" than list
# # # # # # # # Can be key in dict unlike list
# # # # # # # # For Heterogeneous data - meaning mixing different data types(int,str,list et al) inside
# # # # # # # # https://docs.python.org/3/tutorial/datastructures.html#tuples-and-sequences
# # # # # # # # https://realpython.com/python-lists-tuples/
# #
my_tuple = ("Valdis", "programmer", 45, 200.8,
[10, 20, 30], True, None,
('also_tuple', 5, True), {'food': 'potatoes', 'drink': 'kefir'})
print(my_tuple)
simple_tuple = 10, 20, 30 # This is a tuple
print(simple_tuple)
another_tuple = tuple([1, "Valdis", True]) # i need to pass an iterable to tuple constructor
print(another_tuple)
print(type(my_tuple))
numbers_tuple = tuple(range(100, 200, 10))
print(numbers_tuple)
string_tuple = tuple("Valdis")
print(string_tuple)
#
# # regular indexing and slicing rules apply to tuples - just like lists and strings
print(my_tuple[0])
print(len(my_tuple))
print(my_tuple[-1]) # here also print(my_tuple[7]) would since we have 8 elements starting 0
print(my_tuple[8]['drink'], my_tuple[-1]['drink'], my_tuple[-1].get('drink')) # get also would work
print(my_tuple[4]) # list assuming I know the 5th element is list
print(my_tuple[4][1]) # 2nd item in list
print(my_tuple[7][1], my_tuple[-2][-2], my_tuple[-2][1]) # all 5s from innner tuple data structure
# #
mykey = "Valdis", 180 # i can make tuple without parenthesis when there is no confusion
print(mykey)
print(type(mykey))
# # # # # # # to use tuple as key tuple must only have immutables inside as well
newdict = {mykey: 9000, "secondkey": 9050}
print(newdict)
print(newdict[mykey])
print(newdict[("Valdis", 180)]) # here you will need regular parenthesis to create tuple
print(newdict["Valdis", 180]) # turns out I can skip the parenthesis
# #
# #
# # my_tuple.
# my_tuple[0] = "Visvaldis" # error since tuple does not support item assignment
# #
# # # # # # # # tuples are immutable but you can mutate those data structures inside which are mutable
my_tuple[-1].setdefault(mykey, 500) # so dict is mutable we can perform in place methods
print(my_tuple)
# my_tuple[-1][mykey] = 9000
# print(my_tuple)
#
# # # # += for lists is same as mylist = mylist + [1,2,3]
# my_tuple[4] += [1,2,3] # i can not write a new list in place of old list, OUT OF PLACE will not work
print(my_tuple[4])
my_tuple[4].extend([1, 2, 3]) # BUT i can can mutate the old list IN PLACE
print(my_tuple)
my_tuple[4].append(50)
print(my_tuple)
my_pop = my_tuple[4].pop()
my_tuple[4].append(5000) # BUT i can can mutate the old list
print(my_tuple)
my_tuple[4].sort() # .sort() is IN PLACE
print(my_tuple)
my_tuple[4].reverse() # .reverse() is IN PLACE
print(my_tuple)
my_tuple[4].clear()
print(my_tuple)
# # print(my_tuple[-1])
# # print(my_tuple)
# # # # # print(my_tuple[-1].get(mykey))
# #
# # # # # # # regular slicing works
mini_3 = my_tuple[:3] # i can extract sub tuple
print(mini_3)
mini_6 = my_tuple[:3] + my_tuple[1:4] # i can extract sub tuple
print(mini_6, type(mini_6))
# # # my_tuple[1] = "teacher" # will not work because tuples are immutable
new_tuple = my_tuple[:1] + ("teacher",) + my_tuple[2:] # we could slice it together with single item tuple
print(new_tuple)
my_list = list(my_tuple) # i can cast tuple to list
my_list[1] = "teacher"
print(my_list)
print(my_tuple)
my_tuple = tuple(my_list) # i can overwrite reference to old tuple with reference to new tuple
print(my_tuple)
# #
print(numbers_tuple[::2]) # new tuuple of every 2nd value
print(my_tuple[::-1])
print(4, 5, "Valdis") # just some values no tuple
print((4, 5, "Valdis")) # so i create tuple on the fly - hot tuple
# #
# # # # # # do we have tuple comprehensions?
# # # # # # not quite but we can use generator expression then cast to tuple
my_tuple[4].append(50) # we had an empty list so we use IN PLACE append
mult_tuple = tuple(el * 2 for el in my_tuple[:6]) # should be faster than list comprehension
print(mult_tuple) # so True*2 == 2 because True is 1
# int_tuple = tuple(
# el ** 2 for el in range(1, 10)) # this is not tuple but generator meaning it is made on demand not ready
# print(int_tuple)
# #
# # my_list = []
# # # for el in mini_3:
# # for el in my_tuple[:5]:
# # if type(el) is int or type(el) is float:
# # my_list.append(1/el) # could also check for zero
# # else: #list or string or something else even
# # my_list.append(el[::-1]) # might want to check also Booleans and None types and dictionaries
# # my_rev_tuple = tuple(my_list)
# # print(my_rev_tuple)
#
for item in my_tuple:
print(item, "is", type(item))
# #
# # # # # # # # # print(my_tuple)
# # # # # # # # # my_tuple[1] = "scientist"
# # # # # # # # my_list = list(my_tuple)
# # # # # # # # print(my_list)
# # # # # # # # my_list[1] = "scientist"
# # # # # # # # new_tuple = tuple(my_list)
# # # # # # # # print(new_tuple)
# #
# #
t = () # empty tuple only question where would you use it? One use to functions which need some sequence
print(t, type(t))
# t = (1, 2, 55) # 2 or more elements
# print(t, type(t))
single_element_tuple = (5,) # if you really need a tuple of one element
print(single_element_tuple, type(single_element_tuple))
# # # # # # # my_tuple. # i can use . for Intellisense to suggest methods, we only have 2
print(my_tuple.count("programmer"))
# so tuples have just two methods for external use
print(my_tuple.count("teacher"))
print(my_tuple.index("teacher")) # returns index of first occurence
print(my_tuple.index("Valdis"))
print(my_tuple.index(45))
if "programmer" in my_tuple:
print(my_tuple.index("programmer"))
else:
print("Not found the key")
# # # # # # # # # print(my_tuple.index("notprogrammer"))
# # # # # # # print("somevalue" in my_tuple)
# #
# # # # # # # # print(new_tuple.count("programmer"))
# # # # # # # # print(new_tuple.index("scientist"))
# # # # # # # # print(my_tuple.index(45))
# #
# # # # # # Trick on swapping values
a = 10
b = 20
print(a, b)
# # how to change them
temp = a
a = b
b = temp
print(a, b)
# # # # # # # # # # in Python the above is simpler!
print("Before swap", a, b)
a, b = b, a # we can even change a,b,c,d = d,c,b,a and more, so creating tuple on the right and unpack it on the left
print("After swap", a, b)
# #
a, b, c = 5, 10, "Booo" # example of tuple packing and unpacking
print(a, b, c)
# my_num_tuple = 5, 6, 8
# print(my_num_tuple, type(my_num_tuple))
# # my_num_tuple = (5, 6, 8) # same as line two lines above
# # print(my_num_tuple, type(my_num_tuple))
# #
# # # print(my_tuple)
# # # print(len(my_tuple))
name, job, age, top_speed, favorite_list, _, _, inner_tuple, favorite_dict = my_tuple # tuple unpacking
# # # # _ symbolizes variables we do not care about
print(name, job, age, top_speed, favorite_list, inner_tuple, favorite_dict)
# #
# # # # extended unpacking
head, second, *rest, tail = my_tuple # names you pick yourself *rest will be a list
print(head, second, rest, tail, sep="\n")
print(rest, type(rest))
#
# print(my_tuple[0], head) # that is our head item from tuple
#
# tuple_2 = ("Valdis", "RTU")
# print(tuple_2[0], tuple_2[1])
# name, school = tuple_2 # unpacking a small 2 item tuple
# print(name, school)
#
# # name, school, bad_val = tuple_2 # will be an error
# # print(name, school, bad_val)
# #
# # # # # # # # name is my_tuple[0]
# # # # # # # # tuple unpacking and using _ for values that we do need
# # # # # name, job, _, top_speed, _ = my_tuple[:5]
# # # # # print(name, _) # so _ will have value of last unpacking
# # # # # # (name, job, _, top_speed) = my_tuple[:4]
# # # # # # print(name, _) # so _ will have value of last unpacking
# #
# #
def get_min_max(my_num_list):
# so tuple will be created when returning multiple values
return min(my_num_list), max(my_num_list) # returns tuple of min and max
# #
# #
res = get_min_max([3, 6, 1, 2])
print(res, type(res))
# #
# # # # # i could also unpack immediately the result from tuple into individual values
my_min, my_max = get_min_max([3, 6, 1, 2])
print(my_min,my_max)
# #
# # # # list of tuples can be converted into a dictionary
# # my_tup_list =[("mykey","Myval"),("anoteherkey",50)] # list of inner lists len 2 would also work
# # my_new_dict = dict(my_tup_list)
# # print(my_tup_list)
# # print(my_new_dict)
# #
# | |
Generated from::
id: 226
job: tests/stage-array-dirs-job.yml
label: stage_array_dirs
output:
output:
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: a
size: 0
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: B
size: 0
tags:
- resource
- command_line_tool
tool: tests/stage-array-dirs.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test array of directories InitialWorkDirRequirement""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.input_object_requirements
@pytest.mark.red
def test_conformance_v1_1_cwl_requirements_addition(self):
"""Test requirements in input document via EnvVarRequirement
Generated from::
id: 227
job: tests/env-job3.yaml
label: cwl_requirements_addition
output:
out:
checksum: sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519
class: File
location: out
size: 15
tags:
- command_line_tool
- input_object_requirements
tool: tests/env-tool3.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test requirements in input document via EnvVarRequirement""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.input_object_requirements
@pytest.mark.red
def test_conformance_v1_1_cwl_requirements_override_expression(self):
"""Test conflicting requirements in input document via EnvVarRequirement and expression
Generated from::
id: 228
job: tests/env-job3.yaml
label: cwl_requirements_override_expression
output:
out:
checksum: sha1$b3ec4ed1749c207e52b3a6d08c59f31d83bff519
class: File
location: out
size: 15
tags:
- command_line_tool
- input_object_requirements
tool: tests/env-tool4.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test conflicting requirements in input document via EnvVarRequirement and expression""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.input_object_requirements
@pytest.mark.red
def test_conformance_v1_1_cwl_requirements_override_static(self):
"""Test conflicting requirements in input document via EnvVarRequirement
Generated from::
id: 229
job: tests/env-job4.yaml
label: cwl_requirements_override_static
output:
out:
checksum: sha1$715e62184492851512a020c36ab7118eca114a59
class: File
location: out
size: 23
tags:
- command_line_tool
- input_object_requirements
tool: tests/env-tool3.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test conflicting requirements in input document via EnvVarRequirement""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.initial_work_dir
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_initial_work_dir_output(self):
"""Test output of InitialWorkDir
Generated from::
id: 230
job: tests/initialworkdirrequirement-docker-out-job.json
label: initial_work_dir_output
output:
OUTPUT:
checksum: sha1$aeb3d11bdf536511649129f4077d5cda6a324118
class: File
location: ref.fasta
secondaryFiles:
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: ref.fasta.fai
size: 0
size: 12010
tags:
- initial_work_dir
- command_line_tool
tool: tests/initialworkdir-glob-fullpath.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test output of InitialWorkDir""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.initial_work_dir
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_glob_full_path(self):
"""Test if full paths are allowed in glob
Generated from::
id: 231
job: tests/initialworkdirrequirement-docker-out-job.json
label: glob_full_path
output:
OUTPUT:
checksum: sha1$aeb3d11bdf536511649129f4077d5cda6a324118
class: File
location: ref.fasta
secondaryFiles:
- checksum: sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709
class: File
location: ref.fasta.fai
size: 0
size: 12010
tags:
- initial_work_dir
- command_line_tool
tool: tests/initialworkdir-glob-fullpath.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test if full paths are allowed in glob""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_fail_glob_outside_output_dir(self):
"""Test fail trying to glob outside output directory
Generated from::
id: 232
job: tests/empty.json
label: fail_glob_outside_output_dir
should_fail: true
tags:
- required
- command_line_tool
tool: tests/glob-path-error.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test fail trying to glob outside output directory""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_symlink_to_file_out_of_workdir_illegal(self):
"""symlink to file outside of working directory should NOT be retrieved
Generated from::
id: 233
job: tests/empty.json
label: symlink_to_file_out_of_workdir_illegal
output:
output_file:
baesname: symlink.txt
checksum: sha1$cd28ec34f3f9425aca544b6332453708e8aaa82a
class: File
size: 27
should_fail: true
tags:
- command_line_tool
tool: tests/symlink-illegal.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """symlink to file outside of working directory should NOT be retrieved""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_symlink_to_file_in_workdir_legal(self):
"""symlink to file inside of working directory should be retrieved
Generated from::
id: 234
job: tests/empty.json
label: symlink-to-file-in-workdir-legal
output:
output_file:
basename: symlink.txt
checksum: sha1$cd28ec34f3f9425aca544b6332453708e8aaa82a
class: File
size: 27
tags:
- command_line_tool
tool: tests/symlink-legal.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """symlink to file inside of working directory should be retrieved""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.inplace_update
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_inplace_update_on_file_content(self):
"""inplace update has side effect on file content
Generated from::
id: 235
job: tests/empty.json
label: inplace_update_on_file_content
output:
a: 4
b: 4
tags:
- inplace_update
- workflow
tool: tests/inp_update_wf.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """inplace update has side effect on file content""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.inplace_update
@pytest.mark.workflow
@pytest.mark.red
def test_conformance_v1_1_inplace_update_on_dir_content(self):
"""inplace update has side effect on directory content
Generated from::
id: 236
job: tests/empty.json
label: inplace_update_on_dir_content
output:
a:
- basename: blurb
class: File
location: blurb
b:
- basename: blurb
class: File
location: blurb
tags:
- inplace_update
- workflow
tool: tests/inpdir_update_wf.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """inplace update has side effect on directory content""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_outputbinding_glob_directory(self):
"""Test that OutputBinding.glob accepts Directories
Generated from::
id: 237
job: tests/empty.json
label: outputbinding_glob_directory
output:
directories:
- basename: a_dir
class: Directory
listing: []
- basename: b_dir
class: Directory
listing: []
- basename: c_dir
class: Directory
listing: []
tags:
- required
- command_line_tool
tool: tests/glob_directory.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that OutputBinding.glob accepts Directories""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_stage_file_array_to_dir(self):
"""Test that array of input files can be staged to directory with entryname
Generated from::
id: 238
job: tests/stage_file_array.job.json
label: stage_file_array_to_dir
output:
output:
- basename: sfa-1.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-1.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
- basename: sfa-2.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-2.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
tags:
- required
- command_line_tool
tool: tests/stage_file_array.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that array of input files can be staged to directory with entryname""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_stage_file_array_to_dir_basename(self):
"""Test that array of input files can be staged to directory with basename
Generated from::
id: 239
job: tests/stage_file_array.job.json
label: stage_file_array_to_dir_basename
output:
output:
- basename: sfa-1.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-1.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
- basename: sfa-2.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-2.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
tags:
- required
- command_line_tool
tool: tests/stage_file_array_basename.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that array of input files can be staged to directory with basename""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.required
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_stage_file_array_to_dir_basename_entryname(self):
"""Test that if array of input files are staged to directory with basename and entryname, entryname overrides
Generated from::
id: 240
job: tests/stage_file_array.job.json
label: stage_file_array_to_dir_basename_entryname
output:
output:
- basename: sfa-1.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-1.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
- basename: sfa-2.txt
checksum: sha1$4c1cd0638ab3580310823fd1556d27ecb4816df6
class: File
size: 49
- basename: sfa-2.txt.sec
checksum: sha1$40f4ee1bcd1a9466fcd2e48cf7fc3798025d2f9a
class: File
size: 59
tags:
- required
- command_line_tool
tool: tests/stage_file_array_basename_and_entryname.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that if array of input files are staged to directory with basename and entryname, entryname overrides""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_tmpdir_is_not_outdir(self):
"""Test that runtime.tmpdir is not runtime.outdir
Generated from::
id: 241
job: tests/empty.json
label: tmpdir_is_not_outdir
output:
foo:
basename: foo
checksum: sha1$fa98d6085770a79e44853d575cd3ab40c0f1f4de
class: File
tags:
- command_line_tool
tool: tests/runtime-paths-distinct.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that runtime.tmpdir is not runtime.outdir""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_listing_default_none(self):
"""Test that default behavior is 'no_listing' if not specified
Generated from::
id: 242
job: tests/listing-job.yml
label: listing_default_none
output:
out: true
tags:
- command_line_tool
tool: tests/listing_none1.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that default behavior is 'no_listing' if not specified""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_listing_requirement_none(self):
"""Test that 'listing' is not present when LoadListingRequirement is 'no_listing'
Generated from::
id: 243
job: tests/listing-job.yml
label: listing_requirement_none
output:
out: true
tags:
- command_line_tool
tool: tests/listing_none2.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that 'listing' is not present when LoadListingRequirement is 'no_listing'""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_listing_loadListing_none(self):
"""Test that 'listing' is not present when loadListing on input parameter is 'no_listing'
Generated from::
id: 244
job: tests/listing-job.yml
label: listing_loadListing_none
output:
out: true
tags:
- command_line_tool
tool: tests/listing_none3.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that 'listing' is not present when loadListing on input parameter is 'no_listing'""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_listing_requirement_shallow(self):
"""Test that 'listing' is present in top directory object but not subdirectory object when LoadListingRequirement is 'shallow_listing'
Generated from::
id: 245
job: tests/listing-job.yml
label: listing_requirement_shallow
output:
out: true
tags:
- command_line_tool
tool: tests/listing_shallow1.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that 'listing' is present in top directory object but not subdirectory object when LoadListingRequirement is 'shallow_listing'
""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.green
def test_conformance_v1_1_listing_loadListing_shallow(self):
"""Test that 'listing' is present in top directory object but not subdirectory object when loadListing on input parameter loadListing is 'shallow_listing'
Generated from::
id: 246
job: tests/listing-job.yml
label: listing_loadListing_shallow
output:
out: true
tags:
- command_line_tool
tool: tests/listing_shallow2.cwl
""" # noqa: W293
self.cwl_populator.run_conformance_test("""v1.1""", """Test that 'listing' is present in top directory object but not subdirectory object when loadListing on input parameter loadListing is 'shallow_listing'
""")
@pytest.mark.cwl_conformance
@pytest.mark.cwl_conformance_v1_1
@pytest.mark.command_line_tool
@pytest.mark.red
def test_conformance_v1_1_listing_requirement_deep(self):
"""Test that 'listing' is present in top directory object and in subdirectory objects when LoadListingRequirement is 'deep_listing'
Generated from::
id: 247
job: tests/listing-job.yml
label: listing_requirement_deep
| |
= Var(within=Reals,bounds=(0,0.000845292359445023),initialize=0.000845292359445023)
m.x1237 = Var(within=Reals,bounds=(0,0.000473679951457969),initialize=0.000473679951457969)
m.x1238 = Var(within=Reals,bounds=(0,5.80519362905298E-5),initialize=5.80519362905298E-5)
m.x1239 = Var(within=Reals,bounds=(0,None),initialize=0.00191079608847032)
m.x1240 = Var(within=Reals,bounds=(0,0.00146592252044452),initialize=0.00146592252044452)
m.x1241 = Var(within=Reals,bounds=(0,0.00128025396419771),initialize=0.00128025396419771)
m.x1242 = Var(within=Reals,bounds=(0,0.00318804409827166),initialize=0.00318804409827166)
m.x1243 = Var(within=Reals,bounds=(0,0.00117505333232835),initialize=0.00117505333232835)
m.x1244 = Var(within=Reals,bounds=(0,0.00108874725490735),initialize=0.00108874725490735)
m.x1245 = Var(within=Reals,bounds=(0,0.00100588135847852),initialize=0.00100588135847852)
m.x1246 = Var(within=Reals,bounds=(0,0.00107244273250483),initialize=0.00107244273250483)
m.x1247 = Var(within=Reals,bounds=(0,None),initialize=0.0120422607882595)
m.x1248 = Var(within=Reals,bounds=(0,0.00413879380485136),initialize=0.00413879380485136)
m.x1249 = Var(within=Reals,bounds=(0,0.000951844769910024),initialize=0.000951844769910024)
m.x1250 = Var(within=Reals,bounds=(0,0.00013393675573892),initialize=0.00013393675573892)
m.x1251 = Var(within=Reals,bounds=(0,None),initialize=0.0167834683880805)
m.x1252 = Var(within=Reals,bounds=(0,0.000261099429113438),initialize=0.000261099429113438)
m.x1253 = Var(within=Reals,bounds=(0,1.03410352260198E-5),initialize=1.03410352260198E-5)
m.x1254 = Var(within=Reals,bounds=(0,0.00342438814810023),initialize=0.00342438814810023)
m.x1255 = Var(within=Reals,bounds=(0,None),initialize=0.009006599359756)
m.x1256 = Var(within=Reals,bounds=(0,None),initialize=0.0459171493520904)
m.x1257 = Var(within=Reals,bounds=(0,None),initialize=0.00996206555745407)
m.x1258 = Var(within=Reals,bounds=(0,0.00115951901737883),initialize=0.00115951901737883)
m.x1259 = Var(within=Reals,bounds=(0,None),initialize=0.079687739264351)
m.x1260 = Var(within=Reals,bounds=(0,0.000269220724132992),initialize=0.000269220724132992)
m.x1261 = Var(within=Reals,bounds=(0,0.000294601952847613),initialize=0.000294601952847613)
m.x1262 = Var(within=Reals,bounds=(0,0.000294930600727533),initialize=0.000294930600727533)
m.x1263 = Var(within=Reals,bounds=(0,0.000326629792308735),initialize=0.000326629792308735)
m.x1264 = Var(within=Reals,bounds=(0,0.000330905152927294),initialize=0.000330905152927294)
m.x1265 = Var(within=Reals,bounds=(0,0.000342417927286093),initialize=0.000342417927286093)
m.x1266 = Var(within=Reals,bounds=(0,0.000177765030972821),initialize=0.000177765030972821)
m.x1267 = Var(within=Reals,bounds=(0,0.000268244323978542),initialize=0.000268244323978542)
m.x1268 = Var(within=Reals,bounds=(0,0.000326947937426483),initialize=0.000326947937426483)
m.x1269 = Var(within=Reals,bounds=(0,0.000344902858029607),initialize=0.000344902858029607)
m.x1270 = Var(within=Reals,bounds=(0,0.000306451894062758),initialize=0.000306451894062758)
m.x1271 = Var(within=Reals,bounds=(0,None),initialize=0.000356963920882274)
m.x1272 = Var(within=Reals,bounds=(0,None),initialize=0.13958258462053)
m.x1273 = Var(within=Reals,bounds=(0,None),initialize=0.113341104572517)
m.x1274 = Var(within=Reals,bounds=(0,None),initialize=0.126201426550782)
m.x1275 = Var(within=Reals,bounds=(0,None),initialize=0.169523316667137)
m.x1276 = Var(within=Reals,bounds=(0,None),initialize=0.0655416504039948)
m.x1277 = Var(within=Reals,bounds=(0,None),initialize=0.0426874248505515)
m.x1278 = Var(within=Reals,bounds=(0,None),initialize=0.109123407069524)
m.x1279 = Var(within=Reals,bounds=(0,None),initialize=0.0376830006435906)
m.x1280 = Var(within=Reals,bounds=(0,0.00283676150717773),initialize=0.00283676150717773)
m.x1281 = Var(within=Reals,bounds=(0,None),initialize=0.0584797227706253)
m.x1282 = Var(within=Reals,bounds=(0,0.00360568103628853),initialize=0.00360568103628853)
m.x1283 = Var(within=Reals,bounds=(0,0.00387960712898789),initialize=0.00387960712898789)
m.x1284 = Var(within=Reals,bounds=(0,0.00951885438352099),initialize=0.00951885438352099)
m.x1285 = Var(within=Reals,bounds=(0,0.0058681230071236),initialize=0.0058681230071236)
m.x1286 = Var(within=Reals,bounds=(0,None),initialize=0.0270707537663848)
m.x1287 = Var(within=Reals,bounds=(0,None),initialize=0.0356426848604137)
m.x1288 = Var(within=Reals,bounds=(0,None),initialize=0.0261302974812796)
m.x1289 = Var(within=Reals,bounds=(0,None),initialize=0.0355677860334484)
m.x1290 = Var(within=Reals,bounds=(0,0.00816461188734863),initialize=0.00816461188734863)
m.x1291 = Var(within=Reals,bounds=(0,None),initialize=0.045068335751622)
m.x1292 = Var(within=Reals,bounds=(0,None),initialize=0.0152134941454973)
m.x1293 = Var(within=Reals,bounds=(0,None),initialize=0.0520064003765858)
m.x1294 = Var(within=Reals,bounds=(0,0.00534747306850579),initialize=0.00534747306850579)
m.x1295 = Var(within=Reals,bounds=(0,0.00204827123912732),initialize=0.00204827123912732)
m.x1296 = Var(within=Reals,bounds=(0,0.000370720204421088),initialize=0.000370720204421088)
m.x1297 = Var(within=Reals,bounds=(0,None),initialize=0.0824486487654983)
m.x1298 = Var(within=Reals,bounds=(0,0.00739918998503103),initialize=0.00739918998503103)
m.x1299 = Var(within=Reals,bounds=(0,None),initialize=0.010038337553891)
m.x1300 = Var(within=Reals,bounds=(0,None),initialize=0.0103772035447057)
m.x1301 = Var(within=Reals,bounds=(0,None),initialize=0.0641743578708976)
m.x1302 = Var(within=Reals,bounds=(0,0.00749487595772554),initialize=0.00749487595772554)
m.x1303 = Var(within=Reals,bounds=(0,None),initialize=0.111060371156781)
m.x1304 = Var(within=Reals,bounds=(0,0.0064140850143683),initialize=0.0064140850143683)
m.x1305 = Var(within=Reals,bounds=(0,None),initialize=0.123480985690768)
m.x1306 = Var(within=Reals,bounds=(0,None),initialize=0.108842388502275)
m.x1307 = Var(within=Reals,bounds=(0,0.00140409137900354),initialize=0.00140409137900354)
m.x1308 = Var(within=Reals,bounds=(0,0.00434482763363985),initialize=0.00434482763363985)
m.x1309 = Var(within=Reals,bounds=(0,None),initialize=0.0129731512976471)
m.x1310 = Var(within=Reals,bounds=(0,0.00125476325295653),initialize=0.00125476325295653)
m.x1311 = Var(within=Reals,bounds=(0,None),initialize=0.145054707187009)
m.x1312 = Var(within=Reals,bounds=(0,None),initialize=0.0516145454685561)
m.x1313 = Var(within=Reals,bounds=(0,None),initialize=0.0649487459676995)
m.x1314 = Var(within=Reals,bounds=(0,None),initialize=0.0777099444509746)
m.x1315 = Var(within=Reals,bounds=(0,None),initialize=0.0631005004262326)
m.x1316 = Var(within=Reals,bounds=(0,None),initialize=0.0702602396535117)
m.x1317 = Var(within=Reals,bounds=(0,None),initialize=0.0943788765422429)
m.x1318 = Var(within=Reals,bounds=(0,None),initialize=0.0364890650647152)
m.x1319 = Var(within=Reals,bounds=(0,None),initialize=0.023765410440778)
m.x1320 = Var(within=Reals,bounds=(0,None),initialize=0.0607523777033327)
m.x1321 = Var(within=Reals,bounds=(0,None),initialize=0.0209792926153394)
m.x1322 = Var(within=Reals,bounds=(0,0.00378827219371737),initialize=0.00378827219371737)
m.x1323 = Var(within=Reals,bounds=(0,None),initialize=0.0379339101769088)
m.x1324 = Var(within=Reals,bounds=(0,0.00304574397856642),initialize=0.00304574397856642)
m.x1325 = Var(within=Reals,bounds=(0,0.00327713126407908),initialize=0.00327713126407908)
m.x1326 = Var(within=Reals,bounds=(0,0.00804064284380023),initialize=0.00804064284380023)
m.x1327 = Var(within=Reals,bounds=(0,0.0077438830572883),initialize=0.0077438830572883)
m.x1328 = Var(within=Reals,bounds=(0,None),initialize=0.0357239872417547)
m.x1329 = Var(within=Reals,bounds=(0,None),initialize=0.0470359573362315)
m.x1330 = Var(within=Reals,bounds=(0,None),initialize=0.0344829117763112)
m.x1331 = Var(within=Reals,bounds=(0,None),initialize=0.0469371169137584)
m.x1332 = Var(within=Reals,bounds=(0,None),initialize=0.0107744502947571)
m.x1333 = Var(within=Reals,bounds=(0,None),initialize=0.0594745408750792)
m.x1334 = Var(within=Reals,bounds=(0,None),initialize=0.0128509448618061)
m.x1335 = Var(within=Reals,bounds=(0,None),initialize=0.0509083142436066)
m.x1336 = Var(within=Reals,bounds=(0,None),initialize=0.0333155537349904)
m.x1337 = Var(within=Reals,bounds=(0,None),initialize=0.0127610349143935)
m.x1338 = Var(within=Reals,bounds=(0,0.000325853652061895),initialize=0.000325853652061895)
m.x1339 = Var(within=Reals,bounds=(0,None),initialize=0.0507024430682763)
m.x1340 = Var(within=Reals,bounds=(0,None),initialize=0.0341965069390401)
m.x1341 = Var(within=Reals,bounds=(0,None),initialize=0.0463937377621777)
m.x1342 = Var(within=Reals,bounds=(0,None),initialize=0.0332600048920687)
m.x1343 = Var(within=Reals,bounds=(0,None),initialize=0.062429771682457)
m.x1344 = Var(within=Reals,bounds=(0,0.00729112702257881),initialize=0.00729112702257881)
m.x1345 = Var(within=Reals,bounds=(0,None),initialize=0.0454942999988869)
m.x1346 = Var(within=Reals,bounds=(0,None),initialize=0.0162984779258227)
m.x1347 = Var(within=Reals,bounds=(0,None),initialize=0.152586274291039)
m.x1348 = Var(within=Reals,bounds=(0,None),initialize=0.0584703761739015)
m.x1349 = Var(within=Reals,bounds=(0,0.00751593508906119),initialize=0.00751593508906119)
m.x1350 = Var(within=Reals,bounds=(0,None),initialize=0.023257348457457)
m.x1351 = Var(within=Reals,bounds=(0,None),initialize=0.0344155277262696)
m.x1352 = Var(within=Reals,bounds=(0,0.00515593458613595),initialize=0.00515593458613595)
m.x1353 = Var(within=Reals,bounds=(0,None),initialize=0.177012505543565)
m.x1354 = Var(within=Reals,bounds=(0,None),initialize=0.0725383200297681)
m.x1355 = Var(within=Reals,bounds=(0,None),initialize=0.054937009654841)
m.x1356 = Var(within=Reals,bounds=(0,None),initialize=0.0487538700372411)
m.x1357 = Var(within=Reals,bounds=(0,None),initialize=0.0395881584885991)
m.x1358 = Var(within=Reals,bounds=(0,None),initialize=0.0440800545805788)
m.x1359 = Var(within=Reals,bounds=(0,None),initialize=0.0592116686443418)
m.x1360 = Var(within=Reals,bounds=(0,None),initialize=0.0228926059401301)
m.x1361 = Var(within=Reals,bounds=(0,None),initialize=0.0149100059226313)
m.x1362 = Var(within=Reals,bounds=(0,None),initialize=0.0381149870576764)
m.x1363 = Var(within=Reals,bounds=(0,None),initialize=0.0131620439683467)
m.x1364 = Var(within=Reals,bounds=(0,0.0089437373321371),initialize=0.0089437373321371)
m.x1365 = Var(within=Reals,bounds=(0,None),initialize=0.0167647787189423)
m.x1366 = Var(within=Reals,bounds=(0,0.00383791416949944),initialize=0.00383791416949944)
m.x1367 = Var(within=Reals,bounds=(0,0.00412948317462937),initialize=0.00412948317462937)
m.x1368 = Var(within=Reals,bounds=(0,None),initialize=0.0101319406093451)
m.x1369 = Var(within=Reals,bounds=(0,0.00686632531837035),initialize=0.00686632531837035)
m.x1370 = Var(within=Reals,bounds=(0,None),initialize=0.0316756485417659)
m.x1371 = Var(within=Reals,bounds=(0,None),initialize=0.0417057156393381)
m.x1372 = Var(within=Reals,bounds=(0,None),initialize=0.0305752150993518)
m.x1373 = Var(within=Reals,bounds=(0,None),initialize=0.0416180760804391)
m.x1374 = Var(within=Reals,bounds=(0,0.00955346049302552),initialize=0.00955346049302552)
m.x1375 = Var(within=Reals,bounds=(0,None),initialize=0.0527347253035623)
m.x1376 = Var(within=Reals,bounds=(0,None),initialize=0.0161933582479891)
m.x1377 = Var(within=Reals,bounds=(0,None),initialize=0.0510327890182422)
m.x1378 = Var(within=Reals,bounds=(0,None),initialize=0.0258891320651404)
m.x1379 = Var(within=Reals,bounds=(0,0.00991645286206428),initialize=0.00991645286206428)
m.x1380 = Var(within=Reals,bounds=(0,0.00016351602982817),initialize=0.00016351602982817)
m.x1381 = Var(within=Reals,bounds=(0,None),initialize=0.0297431226640182)
m.x1382 = Var(within=Reals,bounds=(0,None),initialize=0.0330812489774132)
m.x1383 = Var(within=Reals,bounds=(0,None),initialize=0.044880688914787)
m.x1384 = Var(within=Reals,bounds=(0,None),initialize=0.0306054827567804)
m.x1385 = Var(within=Reals,bounds=(0,None),initialize=0.0308575409562879)
m.x1386 = Var(within=Reals,bounds=(0,0.00360382946554879),initialize=0.00360382946554879)
m.x1387 = Var(within=Reals,bounds=(0,None),initialize=0.0244693450057249)
m.x1388 = Var(within=Reals,bounds=(0,None),initialize=0.0179775342007283)
m.x1389 = Var(within=Reals,bounds=(0,None),initialize=0.150592060385236)
m.x1390 = Var(within=Reals,bounds=(0,None),initialize=0.0300267554925744)
m.x1391 = Var(within=Reals,bounds=(0,None),initialize=0.017024514152695)
m.x1392 = Var(within=Reals,bounds=(0,None),initialize=0.0526807447478357)
m.x1393 = Var(within=Reals,bounds=(0,None),initialize=0.0824209807733466)
m.x1394 = Var(within=Reals,bounds=(0,None),initialize=0.00874814212511355)
m.x1395 = Var(within=Reals,bounds=(0,None),initialize=0.15039625986602)
m.x1396 = Var(within=Reals,bounds=(0,None),initialize=0.0425113164264099)
m.x1397 = Var(within=Reals,bounds=(0,None),initialize=0.0647213104320693)
m.x1398 = Var(within=Reals,bounds=(0,None),initialize=0.0217926717341698)
m.x1399 = Var(within=Reals,bounds=(0,None),initialize=0.0176956566082512)
m.x1400 = Var(within=Reals,bounds=(0,None),initialize=0.0197035057681588)
m.x1401 = Var(within=Reals,bounds=(0,None),initialize=0.0264672416079566)
m.x1402 = Var(within=Reals,bounds=(0,None),initialize=0.0102328501514213)
m.x1403 = Var(within=Reals,bounds=(0,0.00666467840149368),initialize=0.00666467840149368)
m.x1404 = Var(within=Reals,bounds=(0,None),initialize=0.0170371582905232)
m.x1405 = Var(within=Reals,bounds=(0,0.00588335045676959),initialize=0.00588335045676959)
m.x1406 = Var(within=Reals,bounds=(0,0.00843440042788405),initialize=0.00843440042788405)
m.x1407 = Var(within=Reals,bounds=(0,0.00869456405458045),initialize=0.00869456405458045)
m.x1408 = Var(within=Reals,bounds=(0,0.00296286926826811),initialize=0.00296286926826811)
m.x1409 = Var(within=Reals,bounds=(0,0.00318796050447771),initialize=0.00318796050447771)
m.x1410 = Var(within=Reals,bounds=(0,0.00782185690808752),initialize=0.00782185690808752)
m.x1411 = Var(within=Reals,bounds=(0,0.00763635073837464),initialize=0.00763635073837464)
m.x1412 = Var(within=Reals,bounds=(0,None),initialize=0.0352279204545203)
m.x1413 = Var(within=Reals,bounds=(0,None),initialize=0.0463828114238679)
m.x1414 = Var(within=Reals,bounds=(0,None),initialize=0.034004078684596)
m.x1415 = Var(within=Reals,bounds=(0,None),initialize=0.0462853435091861)
m.x1416 = Var(within=Reals,bounds=(0,None),initialize=0.0106248352222355)
m.x1417 = Var(within=Reals,bounds=(0,None),initialize=0.0586486715729075)
m.x1418 = Var(within=Reals,bounds=(0,None),initialize=0.0125012705819006)
m.x1419 = Var(within=Reals,bounds=(0,None),initialize=0.0446628596401571)
m.x1420 = Var(within=Reals,bounds=(0,None),initialize=0.0625586057754439)
m.x1421 = Var(within=Reals,bounds=(0,None),initialize=0.0239621577010673)
m.x1422 = Var(within=Reals,bounds=(0,0.000147495632473385),initialize=0.000147495632473385)
m.x1423 = Var(within=Reals,bounds=(0,None),initialize=0.0173354702347733)
m.x1424 = Var(within=Reals,bounds=(0,None),initialize=0.0262508974313909)
m.x1425 = Var(within=Reals,bounds=(0,None),initialize=0.0356140834391303)
m.x1426 = Var(within=Reals,bounds=(0,None),initialize=0.0476821270030171)
m.x1427 = Var(within=Reals,bounds=(0,None),initialize=0.0330180077949233)
m.x1428 = Var(within=Reals,bounds=(0,0.00385614879531796),initialize=0.00385614879531796)
m.x1429 = Var(within=Reals,bounds=(0,None),initialize=0.0635298607010435)
m.x1430 = Var(within=Reals,bounds=(0,None),initialize=0.097659167090575)
m.x1431 = Var(within=Reals,bounds=(0,None),initialize=0.140990936733732)
m.x1432 = Var(within=Reals,bounds=(0,None),initialize=0.0297971134288011)
m.x1433 = Var(within=Reals,bounds=(0,None),initialize=0.129369244849129)
m.x1434 = Var(within=Reals,bounds=(0,None),initialize=0.390236365520873)
m.x1435 = Var(within=Reals,bounds=(0,None),initialize=0.36598058763357)
m.x1436 = Var(within=Reals,bounds=(0,None),initialize=0.0983659247155885)
m.x1437 = Var(within=Reals,bounds=(0,None),initialize=0.171010197521755)
m.x1438 = Var(within=Reals,bounds=(0,None),initialize=0.0453006222525726)
m.x1439 = Var(within=Reals,bounds=(0,None),initialize=0.192259591727202)
m.x1440 = Var(within=Reals,bounds=(0,None),initialize=0.0243519455821409)
m.x1441 = Var(within=Reals,bounds=(0,None),initialize=0.0197737878136677)
m.x1442 = Var(within=Reals,bounds=(0,0.00945905619603726),initialize=0.00945905619603726)
m.x1443 = Var(within=Reals,bounds=(0,None),initialize=0.0119195236617396)
m.x1444 = Var(within=Reals,bounds=(0,0.00287074414875242),initialize=0.00287074414875242)
m.x1445 = Var(within=Reals,bounds=(0,None),initialize=0.0243359268813812)
m.x1446 = Var(within=Reals,bounds=(0,None),initialize=0.0699009500076035)
m.x1447 = Var(within=Reals,bounds=(0,None),initialize=0.0967524299745289)
m.x1448 = Var(within=Reals,bounds=(0,None),initialize=0.00380892406650911)
m.x1449 = Var(within=Reals,bounds=(0,0.0001401878907671),initialize=0.0001401878907671)
m.x1450 = Var(within=Reals,bounds=(0,None),initialize=0.00369769962771485)
m.x1451 = Var(within=Reals,bounds=(0,0.000123680931999746),initialize=0.000123680931999746)
m.x1452 = Var(within=Reals,bounds=(0,0.000707229901590938),initialize=0.000707229901590938)
m.x1453 = Var(within=Reals,bounds=(0,0.000130784858738764),initialize=0.000130784858738764)
m.x1454 = Var(within=Reals,bounds=(0,0.000150102256357366),initialize=0.000150102256357366)
m.x1455 = Var(within=Reals,bounds=(0,0.00407753442014462),initialize=0.00407753442014462)
m.x1456 = Var(within=Reals,bounds=(0,0.000555708176145999),initialize=0.000555708176145999)
m.x1457 = Var(within=Reals,bounds=(0,0.00465098352385136),initialize=0.00465098352385136)
m.x1458 = Var(within=Reals,bounds=(0,None),initialize=0.0379583855404499)
m.x1459 = Var(within=Reals,bounds=(0,None),initialize=0.0392958464228881)
m.x1460 = Var(within=Reals,bounds=(0,0.00940362915916169),initialize=0.00940362915916169)
m.x1461 = Var(within=Reals,bounds=(0,None),initialize=0.0186235343316913)
m.x1462 = Var(within=Reals,bounds=(0,0.00309999908668646),initialize=0.00309999908668646)
m.x1463 = Var(within=Reals,bounds=(0,0.000478640853481764),initialize=0.000478640853481764)
m.x1464 = Var(within=Reals,bounds=(0,4.08777802858145E-6),initialize=4.08777802858145E-6)
m.x1465 = Var(within=Reals,bounds=(0,None),initialize=0.0157466006883092)
m.x1466 = Var(within=Reals,bounds=(0,0.000321950863856989),initialize=0.000321950863856989)
m.x1467 = Var(within=Reals,bounds=(0,0.00271708958744996),initialize=0.00271708958744996)
m.x1468 = Var(within=Reals,bounds=(0,0.00761599763067732),initialize=0.00761599763067732)
m.x1469 = Var(within=Reals,bounds=(0,0.000660449762370765),initialize=0.000660449762370765)
m.x1470 = Var(within=Reals,bounds=(0,0.000171099836091178),initialize=0.000171099836091178)
m.x1471 = Var(within=Reals,bounds=(0,None),initialize=0.0128921410887242)
m.x1472 = Var(within=Reals,bounds=(0,0.000360365500276323),initialize=0.000360365500276323)
m.x1473 = Var(within=Reals,bounds=(0,None),initialize=0.0103358357126028)
m.x1474 = Var(within=Reals,bounds=(0,0.000903294068877258),initialize=0.000903294068877258)
m.x1475 = Var(within=Reals,bounds=(0,0.00274031481343043),initialize=0.00274031481343043)
m.x1476 = Var(within=Reals,bounds=(0,0.0038740867149733),initialize=0.0038740867149733)
m.x1477 = Var(within=Reals,bounds=(0,0.00295457082860339),initialize=0.00295457082860339)
m.x1478 = Var(within=Reals,bounds=(0,None),initialize=0.109720284054586)
m.x1479 = Var(within=Reals,bounds=(0,0.00721157528663401),initialize=0.00721157528663401)
m.x1480 = Var(within=Reals,bounds=(0,None),initialize=0.0147650326498675)
m.x1481 = Var(within=Reals,bounds=(0,None),initialize=0.0119892113628273)
m.x1482 = Var(within=Reals,bounds=(0,0.00573519980571272),initialize=0.00573519980571272)
m.x1483 = Var(within=Reals,bounds=(0,0.0072270264994975),initialize=0.0072270264994975)
m.x1484 = Var(within=Reals,bounds=(0,0.00174058499526341),initialize=0.00174058499526341)
m.x1485 = Var(within=Reals,bounds=(0,None),initialize=0.0147553202168741)
m.x1486 = Var(within=Reals,bounds=(0,None),initialize=0.0423822320741357)
m.x1487 = Var(within=Reals,bounds=(0,None),initialize=0.0586627784096066)
m.x1488 = Var(within=Reals,bounds=(0,0.000792934285475461),initialize=0.000792934285475461)
m.x1489 = Var(within=Reals,bounds=(0,0.000313317811412885),initialize=0.000313317811412885)
m.x1490 = Var(within=Reals,bounds=(0,None),initialize=0.00159383565101849)
m.x1491 = Var(within=Reals,bounds=(0,5.33107333259005E-5),initialize=5.33107333259005E-5)
m.x1492 = Var(within=Reals,bounds=(0,0.000304840399196659),initialize=0.000304840399196659)
m.x1493 = Var(within=Reals,bounds=(0,0.000144261624802634),initialize=0.000144261624802634)
m.x1494 = Var(within=Reals,bounds=(0,0.000165569589610582),initialize=0.000165569589610582)
m.x1495 = Var(within=Reals,bounds=(0,0.00449770521076673),initialize=0.00449770521076673)
m.x1496 = Var(within=Reals,bounds=(0,0.000612971296372008),initialize=0.000612971296372008)
m.x1497 = Var(within=Reals,bounds=(0,None),initialize=0.005130245554046)
m.x1498 = Var(within=Reals,bounds=(0,None),initialize=0.0467721177244372)
m.x1499 = Var(within=Reals,bounds=(0,None),initialize=0.0433450990246809)
m.x1500 = Var(within=Reals,bounds=(0,0.00405328742510416),initialize=0.00405328742510416)
m.x1501 = Var(within=Reals,bounds=(0,None),initialize=0.0107160372046033)
m.x1502 = Var(within=Reals,bounds=(0,0.000737708479192616),initialize=0.000737708479192616)
m.x1503 = Var(within=Reals,bounds=(0,0.000113902425848424),initialize=0.000113902425848424)
m.x1504 = Var(within=Reals,bounds=(0,6.22257824090113E-7),initialize=6.22257824090113E-7)
m.x1505 = Var(within=Reals,bounds=(0,0.00348690316193279),initialize=0.00348690316193279)
m.x1506 = Var(within=Reals,bounds=(0,0.000209723405101388),initialize=0.000209723405101388)
m.x1507 = Var(within=Reals,bounds=(0,0.00176995108327665),initialize=0.00176995108327665)
m.x1508 = Var(within=Reals,bounds=(0,0.0049583350119035),initialize=0.0049583350119035)
m.x1509 = Var(within=Reals,bounds=(0,0.00053097307306314),initialize=0.00053097307306314)
m.x1510 = Var(within=Reals,bounds=(0,0.000137556875550712),initialize=0.000137556875550712)
m.x1511 = Var(within=Reals,bounds=(0,None),initialize=0.00234625685547294)
m.x1512 = Var(within=Reals,bounds=(0,None),initialize=0.000693216494278924)
m.x1513 = Var(within=Reals,bounds=(0,0.00124130524057124),initialize=0.00124130524057124)
m.x1514 = Var(within=Reals,bounds=(0,0.00384110136396066),initialize=0.00384110136396066)
m.x1515 = Var(within=Reals,bounds=(0,0.000777690887178421),initialize=0.000777690887178421)
m.x1516 = Var(within=Reals,bounds=(0,0.000228138781323752),initialize=0.000228138781323752)
m.x1517 = Var(within=Reals,bounds=(0,None),initialize=0.0350785571596036)
m.x1518 = Var(within=Reals,bounds=(0,0.000710836716611692),initialize=0.000710836716611692)
m.x1519 = Var(within=Reals,bounds=(0,0.00407166622349274),initialize=0.00407166622349274)
m.x1520 = Var(within=Reals,bounds=(0,None),initialize=0.00684188469546876)
m.x1521 = Var(within=Reals,bounds=(0,None),initialize=0.0055556126206605)
m.x1522 = Var(within=Reals,bounds=(0,0.00265760169358741),initialize=0.00265760169358741)
m.x1523 = Var(within=Reals,bounds=(0,0.00334889079985224),initialize=0.00334889079985224)
m.x1524 = Var(within=Reals,bounds=(0,0.000806559804008439),initialize=0.000806559804008439)
m.x1525 = Var(within=Reals,bounds=(0,None),initialize=0.00683738410625714)
m.x1526 = Var(within=Reals,bounds=(0,None),initialize=0.0196392620229281)
m.x1527 = Var(within=Reals,bounds=(0,None),initialize=0.0271834120053887)
m.x1528 = Var(within=Reals,bounds=(0,0.000614941529585663),initialize=0.000614941529585663)
m.x1529 = Var(within=Reals,bounds=(0,0.000243276534239234),initialize=0.000243276534239234)
m.x1530 = Var(within=Reals,bounds=(0,8.13713160010543E-6),initialize=8.13713160010543E-6)
m.x1531 = Var(within=Reals,bounds=(0,4.65295877685244E-5),initialize=4.65295877685244E-5)
m.x1532 = Var(within=Reals,bounds=(0,7.05267538772982E-5),initialize=7.05267538772982E-5)
m.x1533 = Var(within=Reals,bounds=(0,8.09438110239388E-5),initialize=8.09438110239388E-5)
m.x1534 = Var(within=Reals,bounds=(0,0.00219884219969352),initialize=0.00219884219969352)
m.x1535 = Var(within=Reals,bounds=(0,0.000299669962904006),initialize=0.000299669962904006)
m.x1536 = Var(within=Reals,bounds=(0,None),initialize=0.00250807909598491)
m.x1537 = Var(within=Reals,bounds=(0,None),initialize=0.023435642246524)
m.x1538 = Var(within=Reals,bounds=(0,None),initialize=0.0211905912946917)
m.x1539 = Var(within=Reals,bounds=(0,0.000618677161867156),initialize=0.000618677161867156)
m.x1540 = Var(within=Reals,bounds=(0,None),initialize=0.00506402537643379)
m.x1541 = Var(within=Reals,bounds=(0,0.000595026720422337),initialize=0.000595026720422337)
m.x1542 = Var(within=Reals,bounds=(0,9.18723165211724E-5),initialize=9.18723165211724E-5)
m.x1543 = Var(within=Reals,bounds=(0,1.6972653848378E-7),initialize=1.6972653848378E-7)
m.x1544 = Var(within=Reals,bounds=(0,0.000926523788572199),initialize=0.000926523788572199)
m.x1545 = Var(within=Reals,bounds=(0,0.00140857009204485),initialize=0.00140857009204485)
m.x1546 = Var(within=Reals,bounds=(0,None),initialize=0.001957195832414)
m.x1547 = Var(within=Reals,bounds=(0,0.00267711342921525),initialize=0.00267711342921525)
m.x1548 = Var(within=Reals,bounds=(0,None),initialize=0.00828407365758313)
m.x1549 = Var(within=Reals,bounds=(0,None),initialize=0.00710999953451075)
m.x1550 = Var(within=Reals,bounds=(0,None),initialize=0.00078219007163846)
m.x1551 = Var(within=Reals,bounds=(0,None),initialize=0.021432063188234)
m.x1552 = Var(within=Reals,bounds=(0,0.000788221696571171),initialize=0.000788221696571171)
m.x1553 = Var(within=Reals,bounds=(0,None),initialize=0.0014045763797496)
m.x1554 = Var(within=Reals,bounds=(0,None),initialize=0.00114051648183818)
m.x1555 = Var(within=Reals,bounds=(0,0.000545581332007478),initialize=0.000545581332007478)
m.x1556 = Var(within=Reals,bounds=(0,0.000687496665786903),initialize=0.000687496665786903)
m.x1557 = Var(within=Reals,bounds=(0,0.000165579354226183),initialize=0.000165579354226183)
m.x1558 = Var(within=Reals,bounds=(0,None),initialize=0.00140365244992866)
m.x1559 = Var(within=Reals,bounds=(0,None),initialize=0.00403176095197672)
m.x1560 = Var(within=Reals,bounds=(0,0.00558050597506523),initialize=0.00558050597506523)
m.x1561 = Var(within=Reals,bounds=(0,0.000357413400296902),initialize=0.000357413400296902)
m.x1562 = Var(within=Reals,bounds=(0,0.000169547942498538),initialize=0.000169547942498538)
m.x1563 = Var(within=Reals,bounds=(0,5.67105218327801E-6),initialize=5.67105218327801E-6)
m.x1564 = Var(within=Reals,bounds=(0,3.24281003760952E-5),initialize=3.24281003760952E-5)
m.x1565 = Var(within=Reals,bounds=(0,2.79509644705383E-5),initialize=2.79509644705383E-5)
m.x1566 = Var(within=Reals,bounds=(0,3.20794232210983E-5),initialize=3.20794232210983E-5)
m.x1567 = Var(within=Reals,bounds=(0,0.000871438947932883),initialize=0.000871438947932883)
m.x1568 = Var(within=Reals,bounds=(0,0.000118764355730735),initialize=0.000118764355730735)
m.x1569 = Var(within=Reals,bounds=(0,0.000993994843760131),initialize=0.000993994843760131)
m.x1570 = Var(within=Reals,bounds=(0,None),initialize=0.0160778323126819)
m.x1571 = Var(within=Reals,bounds=(0,0.00839819546236456),initialize=0.00839819546236456)
m.x1572 = Var(within=Reals,bounds=(0,0.000431177796055985),initialize=0.000431177796055985)
m.x1573 = Var(within=Reals,bounds=(0,0.000754884984937),initialize=0.000754884984937)
m.x1574 = Var(within=Reals,bounds=(0,None),initialize=0.0113338422937588)
m.x1575 = Var(within=Reals,bounds=(0,0.00174994888611756),initialize=0.00174994888611756)
m.x1576 = Var(within=Reals,bounds=(0,None),initialize=0.00776726705597066)
m.x1577 = Var(within=Reals,bounds=(0,None),initialize=0.000746544479676867)
m.x1578 = Var(within=Reals,bounds=(0,None),initialize=0.000829106068830786)
m.x1579 = Var(within=Reals,bounds=(0,None),initialize=0.0387811300766529)
m.x1580 = Var(within=Reals,bounds=(0,None),initialize=0.120004529719713)
m.x1581 = Var(within=Reals,bounds=(0,None),initialize=0.0382273708346632)
m.x1582 = Var(within=Reals,bounds=(0,None),initialize=0.00643025346432903)
m.x1583 = Var(within=Reals,bounds=(0,None),initialize=0.0159226535718767)
m.x1584 = Var(within=Reals,bounds=(0,0.000683104375179983),initialize=0.000683104375179983)
m.x1585 = Var(within=Reals,bounds=(0,None),initialize=0.016447378334379)
m.x1586 = Var(within=Reals,bounds=(0,None),initialize=0.247390019417806)
m.x1587 = Var(within=Reals,bounds=(0,None),initialize=0.250387596872746)
m.x1588 = Var(within=Reals,bounds=(0,None),initialize=0.158734775629951)
m.x1589 = Var(within=Reals,bounds=(0,None),initialize=0.167262220565969)
m.x1590 = Var(within=Reals,bounds=(0,None),initialize=0.373388975532826)
m.x1591 = Var(within=Reals,bounds=(0,None),initialize=0.4337520193675)
m.x1592 = Var(within=Reals,bounds=(0,None),initialize=0.16356666558264)
m.x1593 = Var(within=Reals,bounds=(0,None),initialize=0.176904335216011)
m.x1594 = Var(within=Reals,bounds=(0,None),initialize=0.449448712228857)
m.x1595 = Var(within=Reals,bounds=(0,None),initialize=0.315242892757086)
m.x1596 = Var(within=Reals,bounds=(0,None),initialize=0.133428691047625)
m.x1597 = Var(within=Reals,bounds=(0,None),initialize=0.203416953461593)
m.x1598 = Var(within=Reals,bounds=(0,None),initialize=0.214371695534575)
m.x1599 = Var(within=Reals,bounds=(0,None),initialize=0.123038113371779)
m.x1600 = Var(within=Reals,bounds=(0,None),initialize=0.0654861086238599)
m.x1601 = Var(within=Reals,bounds=(0,None),initialize=0.0712135243643551)
m.x1602 = Var(within=Reals,bounds=(0,None),initialize=0.154655814707253)
m.x1603 = Var(within=Reals,bounds=(0,None),initialize=0.0343540761607488)
m.x1604 = Var(within=Reals,bounds=(0,None),initialize=0.0267546861508038)
m.x1605 = Var(within=Reals,bounds=(0,None),initialize=0.0243962462000316)
m.x1606 = Var(within=Reals,bounds=(0,None),initialize=0.430187671765251)
m.x1607 = Var(within=Reals,bounds=(0,None),initialize=0.0570985503449696)
m.x1608 = Var(within=Reals,bounds=(0,None),initialize=0.145200124400831)
m.x1609 = Var(within=Reals,bounds=(0,None),initialize=0.0710558606383934)
m.x1610 = Var(within=Reals,bounds=(0,None),initialize=0.426722240397328)
m.x1611 = Var(within=Reals,bounds=(0,None),initialize=0.168633148634524)
m.x1612 = Var(within=Reals,bounds=(0,None),initialize=0.139791500435808)
m.x1613 = Var(within=Reals,bounds=(0,None),initialize=0.185735904703992)
m.x1614 = Var(within=Reals,bounds=(0,None),initialize=0.335280913386447)
m.x1615 = Var(within=Reals,bounds=(0,None),initialize=0.214516414324126)
m.x1616 = Var(within=Reals,bounds=(0,None),initialize=0.548803609059125)
m.x1617 = Var(within=Reals,bounds=(0,None),initialize=0.0490773111259597)
m.x1618 = Var(within=Reals,bounds=(0,None),initialize=0.566643436284715)
m.x1619 = Var(within=Reals,bounds=(0,None),initialize=0.175469303528183)
m.x1620 = Var(within=Reals,bounds=(0,None),initialize=0.432921201303451)
m.x1621 = Var(within=Reals,bounds=(0,None),initialize=0.753640837235269)
m.x1622 = Var(within=Reals,bounds=(0,None),initialize=0.318754971458273)
m.x1623 = Var(within=Reals,bounds=(0,None),initialize=0.0961100435039043)
m.x1624 = Var(within=Reals,bounds=(0,None),initialize=0.129783637796767)
m.x1625 = Var(within=Reals,bounds=(0,None),initialize=0.467910764251318)
m.x1626 = Var(within=Reals,bounds=(0,None),initialize=0.0646053543045028)
m.x1627 = Var(within=Reals,bounds=(0,None),initialize=0.0885231885229456)
m.x1628 = Var(within=Reals,bounds=(0,None),initialize=0.424870231679797)
m.x1629 = Var(within=Reals,bounds=(0,None),initialize=0.0224606601002363)
m.x1630 = Var(within=Reals,bounds=(0,None),initialize=0.00751660630858467)
m.x1631 = Var(within=Reals,bounds=(0,None),initialize=0.00256537749393031)
m.x1632 = Var(within=Reals,bounds=(0,None),initialize=0.0193906636678539)
m.x1633 = Var(within=Reals,bounds=(0,None),initialize=0.00735994757539304)
m.x1634 = Var(within=Reals,bounds=(0,6.2656745814571E-5),initialize=6.2656745814571E-5)
m.x1635 = Var(within=Reals,bounds=(0,1.65205609893312E-5),initialize=1.65205609893312E-5)
m.x1636 = Var(within=Reals,bounds=(0,8.27688167255289E-6),initialize=8.27688167255289E-6)
m.x1637 = Var(within=Reals,bounds=(0,None),initialize=0.00179164112554169)
m.x1638 = Var(within=Reals,bounds=(0,None),initialize=0.000558046360162964)
m.x1639 = Var(within=Reals,bounds=(0,None),initialize=0.263581115218596)
m.x1640 = Var(within=Reals,bounds=(0,None),initialize=0.123905225712026)
m.x1641 = Var(within=Reals,bounds=(0,None),initialize=0.0492754236556354)
m.x1642 = Var(within=Reals,bounds=(0,None),initialize=0.00649107723423761)
m.x1643 = Var(within=Reals,bounds=(0,None),initialize=0.235474905204999)
m.x1644 = Var(within=Reals,bounds=(0,None),initialize=0.112126888853013)
m.x1645 = Var(within=Reals,bounds=(0,None),initialize=0.045865673447173)
m.x1646 = Var(within=Reals,bounds=(0,None),initialize=0.0149949028470258)
m.x1647 = Var(within=Reals,bounds=(0,1.43180325590822E-5),initialize=1.43180325590822E-5)
m.x1648 = Var(within=Reals,bounds=(0,5.66279829951837E-6),initialize=5.66279829951837E-6)
m.x1649 = Var(within=Reals,bounds=(0,2.83708958133538E-6),initialize=2.83708958133538E-6)
m.x1650 = Var(within=Reals,bounds=(0,0.000204708916534099),initialize=0.000204708916534099)
m.x1651 = Var(within=Reals,bounds=(0,6.37610389430373E-5),initialize=6.37610389430373E-5)
m.x1652 = Var(within=Reals,bounds=(0,None),initialize=0.203610972674456)
m.x1653 = Var(within=Reals,bounds=(0,None),initialize=0.194539822866439)
m.x1654 = Var(within=Reals,bounds=(0,None),initialize=0.142573009688196)
m.x1655 = Var(within=Reals,bounds=(0,None),initialize=0.0574538644943166)
m.x1656 = Var(within=Reals,bounds=(0,None),initialize=0.207208573040247)
m.x1657 = Var(within=Reals,bounds=(0,None),initialize=0.220418366843128)
m.x1658 = Var(within=Reals,bounds=(0,None),initialize=0.154101904413792)
m.x1659 = Var(within=Reals,bounds=(0,None),initialize=0.082259490920705)
m.x1660 = Var(within=Reals,bounds=(0,0.000217747225150977),initialize=0.000217747225150977)
m.x1661 = Var(within=Reals,bounds=(0,5.51854008503446E-5),initialize=5.51854008503446E-5)
m.x1662 = Var(within=Reals,bounds=(0,0.000888293830032156),initialize=0.000888293830032156)
m.x1663 = Var(within=Reals,bounds=(0,0.000310271138544125),initialize=0.000310271138544125)
m.x1664 = Var(within=Reals,bounds=(0,0.000155447354469163),initialize=0.000155447354469163)
m.x1665 = Var(within=Reals,bounds=(0,0.00187077496888643),initialize=0.00187077496888643)
m.x1666 = Var(within=Reals,bounds=(0,0.000581883565082127),initialize=0.000581883565082127)
m.x1667 = Var(within=Reals,bounds=(0,None),initialize=0.0870150642623878)
m.x1668 = Var(within=Reals,bounds=(0,0.00671481224633886),initialize=0.00671481224633886)
m.x1669 = Var(within=Reals,bounds=(0,0.0079437786055848),initialize=0.0079437786055848)
m.x1670 = Var(within=Reals,bounds=(0,None),initialize=0.0821134063545123)
m.x1671 = Var(within=Reals,bounds=(0,None),initialize=0.098630239014832)
m.x1672 = Var(within=Reals,bounds=(0,None),initialize=0.140461621297225)
m.x1673 = Var(within=Reals,bounds=(0,None),initialize=0.0852280119312233)
m.x1674 = Var(within=Reals,bounds=(0,None),initialize=0.092615630665065)
m.x1675 = Var(within=Reals,bounds=(0,None),initialize=0.139212013828868)
m.x1676 = Var(within=Reals,bounds=(0,None),initialize=0.177101066135244)
m.x1677 = Var(within=Reals,bounds=(0,None),initialize=0.0884517204356554)
m.x1678 = Var(within=Reals,bounds=(0,3.40200056118846E-5),initialize=3.40200056118846E-5)
m.x1679 = Var(within=Reals,bounds=(0,1.70441888220984E-5),initialize=1.70441888220984E-5)
m.x1680 = Var(within=Reals,bounds=(0,0.000501441446744868),initialize=0.000501441446744868)
m.x1681 = Var(within=Reals,bounds=(0,0.000259821733934611),initialize=0.000259821733934611)
m.x1682 = Var(within=Reals,bounds=(0,None),initialize=0.165753265253869)
m.x1683 = Var(within=Reals,bounds=(0,None),initialize=0.0117145636407983)
m.x1684 = Var(within=Reals,bounds=(0,None),initialize=0.0138606059227122)
m.x1685 = Var(within=Reals,bounds=(0,None),initialize=0.00418610974561325)
m.x1686 = Var(within=Reals,bounds=(0,None),initialize=0.00359022144059255)
m.x1687 = Var(within=Reals,bounds=(0,None),initialize=0.0019168270577352)
m.x1688 = Var(within=Reals,bounds=(0,None),initialize=0.00112245169331991)
m.x1689 = Var(within=Reals,bounds=(0,None),initialize=0.0449192489285491)
m.x1690 = Var(within=Reals,bounds=(0,None),initialize=0.0135450663385198)
m.x1691 = Var(within=Reals,bounds=(0,0.00950927732264413),initialize=0.00950927732264413)
m.x1692 = Var(within=Reals,bounds=(0,None),initialize=0.0159662325861044)
m.x1693 = Var(within=Reals,bounds=(0,3.96152055668893E-5),initialize=3.96152055668893E-5)
m.x1694 = Var(within=Reals,bounds=(0,1.1455628551119E-5),initialize=1.1455628551119E-5)
m.x1695 = Var(within=Reals,bounds=(0,None),initialize=0.00805631962863736)
m.x1696 = Var(within=Reals,bounds=(0,None),initialize=0.0019316197132546)
m.x1697 = Var(within=Reals,bounds=(0,None),initialize=0.00228534257112175)
m.x1698 = Var(within=Reals,bounds=(0,None),initialize=0.176291376987096)
m.x1699 = Var(within=Reals,bounds=(0,None),initialize=0.17310157661096)
m.x1700 = Var(within=Reals,bounds=(0,None),initialize=0.113590020480419)
m.x1701 = Var(within=Reals,bounds=(0,None),initialize=0.0591679934526422)
m.x1702 = Var(within=Reals,bounds=(0,None),initialize=0.143752622093098)
m.x1703 = Var(within=Reals,bounds=(0,None),initialize=0.140873866935131)
m.x1704 = Var(within=Reals,bounds=(0,None),initialize=0.107034673330334)
m.x1705 = Var(within=Reals,bounds=(0,None),initialize=0.0349071637122992)
m.x1706 = Var(within=Reals,bounds=(0,0.000284386892630815),initialize=0.000284386892630815)
m.x1707 = Var(within=Reals,bounds=(0,8.22368724373977E-5),initialize=8.22368724373977E-5)
m.x1708 = Var(within=Reals,bounds=(0,None),initialize=0.0578341490961175)
m.x1709 = Var(within=Reals,bounds=(0,None),initialize=0.0138665777480159)
m.x1710 = Var(within=Reals,bounds=(0,None),initialize=0.0164058588892301)
m.x1711 = Var(within=Reals,bounds=(0,None),initialize=0.000661964202650907)
m.x1712 = Var(within=Reals,bounds=(0,0.000302656903852927),initialize=0.000302656903852927)
m.x1713 = Var(within=Reals,bounds=(0,None),initialize=0.00154880922625137)
m.x1714 = Var(within=Reals,bounds=(0,None),initialize=0.00384004010585051)
m.x1715 = Var(within=Reals,bounds=(0,0.00272974568619058),initialize=0.00272974568619058)
m.x1716 = Var(within=Reals,bounds=(0,None),initialize=0.00679930817355016)
m.x1717 = Var(within=Reals,bounds=(0,4.4608638697891E-5),initialize=4.4608638697891E-5)
m.x1718 = Var(within=Reals,bounds=(0,1.2899592158654E-5),initialize=1.2899592158654E-5)
m.x1719 = Var(within=Reals,bounds=(0,None),initialize=0.00907180579794807)
m.x1720 = Var(within=Reals,bounds=(0,None),initialize=0.00217509728038158)
m.x1721 = Var(within=Reals,bounds=(0,None),initialize=0.00257340633721932)
m.x1722 = Var(within=Reals,bounds=(0,None),initialize=0.0389392294081174)
m.x1723 = Var(within=Reals,bounds=(0,None),initialize=0.0907954119602354)
m.x1724 = Var(within=Reals,bounds=(0,None),initialize=0.107853956008523)
m.x1725 = Var(within=Reals,bounds=(0,None),initialize=0.147406615557667)
m.x1726 = Var(within=Reals,bounds=(0,None),initialize=0.04838450393287)
m.x1727 = Var(within=Reals,bounds=(0,None),initialize=0.116242904960336)
m.x1728 = Var(within=Reals,bounds=(0,None),initialize=0.169840084204518)
m.x1729 = Var(within=Reals,bounds=(0,None),initialize=0.131554481856836)
m.x1730 = Var(within=Reals,bounds=(0,0.000271566853181457),initialize=0.000271566853181457)
m.x1731 = Var(within=Reals,bounds=(0,7.85296694116664E-5),initialize=7.85296694116664E-5)
m.x1732 = Var(within=Reals,bounds=(0,None),initialize=0.0552270103982914)
m.x1733 = Var(within=Reals,bounds=(0,None),initialize=0.0132414783557315)
m.x1734 = Var(within=Reals,bounds=(0,None),initialize=0.0156662897894912)
m.x1735 = Var(within=Reals,bounds=(0,None),initialize=0.197091681328285)
m.x1736 = Var(within=Reals,bounds=(0,None),initialize=0.046336716958708)
m.x1737 = Var(within=Reals,bounds=(0,None),initialize=0.0294369894802172)
m.x1738 = Var(within=Reals,bounds=(0,None),initialize=0.010763354609587)
m.x1739 = Var(within=Reals,bounds=(0,None),initialize=0.127215136797226)
m.x1740 = Var(within=Reals,bounds=(0,None),initialize=0.0502065897623384)
m.x1741 = Var(within=Reals,bounds=(0,None),initialize=0.0427548924993172)
m.x1742 = Var(within=Reals,bounds=(0,0.00954331468644719),initialize=0.00954331468644719)
m.x1743 = Var(within=Reals,bounds=(0,0.00162111893993746),initialize=0.00162111893993746)
m.x1744 = Var(within=Reals,bounds=(0,0.000235458943284733),initialize=0.000235458943284733)
m.x1745 = Var(within=Reals,bounds=(0,0.000374418600324541),initialize=0.000374418600324541)
m.x1746 = Var(within=Reals,bounds=(0,None),initialize=0.011227410333382)
m.x1747 = Var(within=Reals,bounds=(0,0.00131125649466486),initialize=0.00131125649466486)
m.x1748 = Var(within=Reals,bounds=(0,0.00065694590248578),initialize=0.00065694590248578)
m.x1749 = Var(within=Reals,bounds=(0,None),initialize=0.0199285389560349)
m.x1750 = Var(within=Reals,bounds=(0,None),initialize=0.0111864900152529)
m.x1751 = Var(within=Reals,bounds=(0,None),initialize=0.0179692085743177)
m.x1752 = Var(within=Reals,bounds=(0,None),initialize=0.00648387710459162)
m.x1753 = Var(within=Reals,bounds=(0,None),initialize=0.00783776492227562)
m.x1754 = Var(within=Reals,bounds=(0,None),initialize=0.243884072920747)
m.x1755 = Var(within=Reals,bounds=(0,None),initialize=0.0505581133033652)
m.x1756 = Var(within=Reals,bounds=(0,None),initialize=0.0210742498117144)
m.x1757 = Var(within=Reals,bounds=(0,None),initialize=0.0254718054519076)
m.x1758 = Var(within=Reals,bounds=(0,None),initialize=0.123114992904999)
m.x1759 = Var(within=Reals,bounds=(0,None),initialize=0.0428654647542876)
m.x1760 = Var(within=Reals,bounds=(0,None),initialize=0.0345125597916363)
m.x1761 = Var(within=Reals,bounds=(0,0.00613490393653142),initialize=0.00613490393653142)
m.x1762 = Var(within=Reals,bounds=(0,0.000837841645154914),initialize=0.000837841645154914)
m.x1763 = Var(within=Reals,bounds=(0,0.000574728713741665),initialize=0.000574728713741665)
m.x1764 = Var(within=Reals,bounds=(0,None),initialize=0.0223607587376095)
m.x1765 = Var(within=Reals,bounds=(0,0.00354152472500527),initialize=0.00354152472500527)
m.x1766 = Var(within=Reals,bounds=(0,0.00177432116913093),initialize=0.00177432116913093)
m.x1767 = Var(within=Reals,bounds=(0,None),initialize=0.0217822052233777)
m.x1768 = Var(within=Reals,bounds=(0,None),initialize=0.0570546449178343)
m.x1769 = Var(within=Reals,bounds=(0,None),initialize=0.0132941635288087)
m.x1770 = Var(within=Reals,bounds=(0,None),initialize=0.0338177347216876)
m.x1771 = Var(within=Reals,bounds=(0,0.00842570907150569),initialize=0.00842570907150569)
m.x1772 = Var(within=Reals,bounds=(0,None),initialize=0.00996669946990998)
m.x1773 = Var(within=Reals,bounds=(0,0.00135105800434929),initialize=0.00135105800434929)
m.x1774 = Var(within=Reals,bounds=(0,None),initialize=0.0109678808852569)
m.x1775 = Var(within=Reals,bounds=(0,None),initialize=0.335163679471586)
m.x1776 = Var(within=Reals,bounds=(0,None),initialize=0.0393206107504306)
m.x1777 = Var(within=Reals,bounds=(0,None),initialize=0.0141989304779298)
m.x1778 = Var(within=Reals,bounds=(0,None),initialize=0.0402378371923066)
m.x1779 = Var(within=Reals,bounds=(0,None),initialize=0.14094371208526)
m.x1780 = Var(within=Reals,bounds=(0,None),initialize=0.0453489553719886)
m.x1781 = Var(within=Reals,bounds=(0,0.000254667434938753),initialize=0.000254667434938753)
m.x1782 = Var(within=Reals,bounds=(0,None),initialize=0.148509816067922)
m.x1783 = Var(within=Reals,bounds=(0,None),initialize=0.0917457586407488)
m.x1784 = Var(within=Reals,bounds=(0,None),initialize=0.108554886267181)
m.x1785 = Var(within=Reals,bounds=(0,0.00187466105381142),initialize=0.00187466105381142)
m.x1786 = Var(within=Reals,bounds=(0,0.00673222532161902),initialize=0.00673222532161902)
m.x1787 = Var(within=Reals,bounds=(0,None),initialize=0.0263023251593147)
m.x1788 = Var(within=Reals,bounds=(0,None),initialize=0.57042296123861)
m.x1789 = Var(within=Reals,bounds=(0,None),initialize=0.0375279396344047)
m.x1790 = Var(within=Reals,bounds=(0,None),initialize=0.0339317791197778)
m.x1791 = Var(within=Reals,bounds=(0,None),initialize=0.10998325180743)
m.x1792 = Var(within=Reals,bounds=(0,None),initialize=0.535661869617752)
m.x1793 = Var(within=Reals,bounds=(0,None),initialize=0.364888355211421)
m.x1794 = Var(within=Reals,bounds=(0,None),initialize=0.100822246652588)
m.x1795 = Var(within=Reals,bounds=(0,None),initialize=0.119293205345415)
m.x1796 = Var(within=Reals,bounds=(0,None),initialize=1)
m.x1797 = Var(within=Reals,bounds=(0,0.000759461301306587),initialize=0.000759461301306587)
m.x1798 = Var(within=Reals,bounds=(0,0.00148327227077054),initialize=0.00148327227077054)
m.x1799 = Var(within=Reals,bounds=(0,0.00254100518048818),initialize=0.00254100518048818)
m.x1800 = Var(within=Reals,bounds=(0,None),initialize=0.0350452233167942)
m.x1801 = Var(within=Reals,bounds=(0,None),initialize=0.0125436078074883)
m.x1802 = Var(within=Reals,bounds=(0,None),initialize=0.0170046363439775)
m.x1803 = Var(within=Reals,bounds=(0,None),initialize=0.0331208051313957)
m.x1804 = Var(within=Reals,bounds=(0,None),initialize=0.0247526989855815)
m.x1805 = Var(within=Reals,bounds=(0,0.00165083569921346),initialize=0.00165083569921346)
m.x1806 = Var(within=Reals,bounds=(0,None),initialize=0.0121347894337138)
m.x1807 = Var(within=Reals,bounds=(0,None),initialize=0.403865216425721)
m.x1808 = Var(within=Reals,bounds=(0,None),initialize=0.0240811099342396)
m.x1809 = Var(within=Reals,bounds=(0,None),initialize=0.0733532046841998)
m.x1810 = Var(within=Reals,bounds=(0,0.000110313569750404),initialize=0.000110313569750404)
m.x1811 = Var(within=Reals,bounds=(0,None),initialize=0.0856553528027824)
m.x1812 = Var(within=Reals,bounds=(0,None),initialize=0.0669671024360797)
m.x1813 = Var(within=Reals,bounds=(0,None),initialize=0.0467563638299112)
m.x1814 = Var(within=Reals,bounds=(0,None),initialize=0.0783348217298249)
m.x1815 = Var(within=Reals,bounds=(0,None),initialize=0.0465476594415828)
m.x1816 = Var(within=Reals,bounds=(0,None),initialize=0.139740097316646)
m.x1817 = Var(within=Reals,bounds=(0,None),initialize=0.00389952984081387)
m.x1818 = Var(within=Reals,bounds=(0,5.82388851538996E-5),initialize=5.82388851538996E-5)
m.x1819 = Var(within=Reals,bounds=(0,0.000173163422256161),initialize=0.000173163422256161)
m.x1820 = Var(within=Reals,bounds=(0,0.000452633215343031),initialize=0.000452633215343031)
m.x1821 = Var(within=Reals,bounds=(0,0.00251341850223232),initialize=0.00251341850223232)
m.x1822 = Var(within=Reals,bounds=(0,None),initialize=0.00905890615475099)
m.x1823 = Var(within=Reals,bounds=(0,None),initialize=0.011082019754648)
m.x1824 = Var(within=Reals,bounds=(0,None),initialize=0.0569922940565154)
m.x1825 = Var(within=Reals,bounds=(0,None),initialize=0.0378350139293933)
m.x1826 = Var(within=Reals,bounds=(0,0.0051438360530213),initialize=0.0051438360530213)
m.x1827 = Var(within=Reals,bounds=(0,None),initialize=0.00660900256022783)
m.x1828 = Var(within=Reals,bounds=(0,0.000310416759343607),initialize=0.000310416759343607)
m.x1829 = Var(within=Reals,bounds=(0,0.000204840207290355),initialize=0.000204840207290355)
m.x1830 = Var(within=Reals,bounds=(0,None),initialize=0.0393171678586315)
m.x1831 = Var(within=Reals,bounds=(0,None),initialize=0.0392477529138222)
m.x1832 = Var(within=Reals,bounds=(0,None),initialize=0.0654126233629615)
m.x1833 = Var(within=Reals,bounds=(0,0.000479516572237495),initialize=0.000479516572237495)
m.x1834 = Var(within=Reals,bounds=(0,None),initialize=0.015369359792106)
m.x1835 = Var(within=Reals,bounds=(0,7.79441298658939E-5),initialize=7.79441298658939E-5)
m.x1836 = Var(within=Reals,bounds=(0,None),initialize=0.0303801593720193)
m.x1837 = Var(within=Reals,bounds=(0,None),initialize=0.0706524833856603)
m.x1838 = Var(within=Reals,bounds=(0,0.000133952191839776),initialize=0.000133952191839776)
m.x1839 = Var(within=Reals,bounds=(0,None),initialize=0.0831571619222687)
m.x1840 = Var(within=Reals,bounds=(0,None),initialize=0.0564197392785236)
m.x1841 = Var(within=Reals,bounds=(0,None),initialize=0.0564490746751247)
m.x1842 = Var(within=Reals,bounds=(0,None),initialize=0.10946095276264)
m.x1843 = Var(within=Reals,bounds=(0,None),initialize=0.0966347296450886)
m.x1844 = Var(within=Reals,bounds=(0,None),initialize=0.00911678738067905)
m.x1845 = Var(within=Reals,bounds=(0,None),initialize=0.00871832439162501)
m.x1846 = Var(within=Reals,bounds=(0,None),initialize=0.00885783812235054)
m.x1847 = Var(within=Reals,bounds=(0,None),initialize=0.00701909087484137)
m.x1848 = Var(within=Reals,bounds=(0,None),initialize=0.0238236363415222)
m.x1849 = Var(within=Reals,bounds=(0,None),initialize=1)
m.x1850 = Var(within=Reals,bounds=(0,None),initialize=1)
m.x1851 = Var(within=Reals,bounds=(0,None),initialize=0.309541224622313)
m.x1852 = Var(within=Reals,bounds=(0,None),initialize=0.15855921226156)
m.x1853 = Var(within=Reals,bounds=(0,None),initialize=0.0490082926412113)
m.x1854 = Var(within=Reals,bounds=(0,None),initialize=0.0333826378416693)
m.x1855 = Var(within=Reals,bounds=(0,0.000582290156137133),initialize=0.000582290156137133)
m.x1856 = Var(within=Reals,bounds=(0,0.00252571460439443),initialize=0.00252571460439443)
m.x1857 = Var(within=Reals,bounds=(0,0.00157954559843896),initialize=0.00157954559843896)
m.x1858 = Var(within=Reals,bounds=(0,None),initialize=0.0705572893497556)
m.x1859 = Var(within=Reals,bounds=(0,0.00897076056955651),initialize=0.00897076056955651)
m.x1860 = Var(within=Reals,bounds=(0,None),initialize=0.0130552012980747)
m.x1861 = Var(within=Reals,bounds=(0,None),initialize=0.236758138601416)
m.x1862 = Var(within=Reals,bounds=(0,None),initialize=0.636744018258343)
m.x1863 = Var(within=Reals,bounds=(0,None),initialize=0.0149489430144047)
m.x1864 = Var(within=Reals,bounds=(0,None),initialize=0.409928341250449)
m.x1865 = Var(within=Reals,bounds=(0,0.00421659402779223),initialize=0.00421659402779223)
m.x1866 = Var(within=Reals,bounds=(0,None),initialize=0.106821314474901)
m.x1867 = Var(within=Reals,bounds=(0,None),initialize=0.32785153255267)
m.x1868 = Var(within=Reals,bounds=(0,None),initialize=0.118896595840241)
m.x1869 = Var(within=Reals,bounds=(0,None),initialize=0.390596248549136)
m.x1870 = Var(within=Reals,bounds=(0,None),initialize=0.382066026274561)
m.x1871 = Var(within=Reals,bounds=(0,None),initialize=0.339745535429727)
m.x1872 = Var(within=Reals,bounds=(0,None),initialize=0.630646215249219)
m.x1873 = Var(within=Reals,bounds=(0,None),initialize=0.275690399348828)
m.x1874 = Var(within=Reals,bounds=(0,None),initialize=0.0202141302695291)
m.x1875 = Var(within=Reals,bounds=(0,None),initialize=0.0744034575120527)
m.x1876 | |
e:
print("{exception_type} <--Number--- {q_string_input}".format(
exception_type=type_name(e),
q_string_input=q_in,
))
# NOTE: print THEN a stack trace
raise
match_x = floats_really_same(x_new, x)
# Compare Number(x) and q
try:
q_new = Number(x).qstring()
except Exception as e:
print("{x_input:.17e} ---Number--> {exception_type}".format(
x_input=x,
exception_type=type_name(e),
))
raise
match_q = q_new == q
if not match_x or not match_q:
report = "\n"
if not match_x:
q_shoulda = Number(x, qigits = 7).qstring()
report += "Number({}) ~~ ".format(q_shoulda)
report += \
"{x_out_expected:.17e} {equality} {x_out_computed:.17e} " \
"<--- " \
"Number({q_in}).__float__()".format(
x_out_expected=x,
equality='==' if match_x else '!!!=',
x_out_computed=x_new,
q_in=q_in,
)
report += \
"\nNumber._from_float({x_in:.17e}) " \
"---> " \
"{q_out_computed} {equality} {q_out_expected}".format(
x_in=x,
q_out_computed=q_new,
equality='==' if match_q else '!!!=',
q_out_expected=q,
)
self.fail(report)
if not context.the_first:
x_oos = x > context.x_in_last
qin_oos = Number(q_in).raw > Number(context.q_in_last ).raw
qout_oos = Number(q ).raw > Number(context.q_out_last).raw
if x_oos:
self.fail("Float out of sequence: {x_later:.17e} should be less than {x_early:.17e}".format(
x_later=x,
x_early=context.x_in_last,
))
if qin_oos:
self.fail("Qiki Number input out of sequence: {q_later} should be less than {q_early}".format(
q_later=q_in,
q_early=context.q_in_last,
))
if qout_oos:
self.fail("Qiki Number output out of sequence: {q_later} should be less than {q_early}".format(
q_later=q,
q_early=context.q_out_last,
))
this_zone = Number(q_in).zone
last_zone = Number(context.q_in_last).zone
if not context.after_zone_boundary and this_zone != last_zone:
self.fail("{zone_early} is in a different zone than {zone_later} -- need zone_boundary()?".format(
zone_early=context.q_in_last,
zone_later=q_in,
))
if context.after_zone_boundary and this_zone == last_zone:
self.fail("{zone_early} is in the same zone as {zone_later} -- remove zone_boundary()?".format(
zone_early=context.q_in_last,
zone_later=q_in,
))
context.x_in_last = x
context.q_in_last = q_in
context.q_out_last = q
context.the_first = False
context.after_zone_boundary = False
# noinspection PyPep8Naming
class context(object):
"""Variables that are local to test_floats_and_qstrings(), but global to f__q()."""
the_first = True
after_zone_boundary = False
x_in_last = None
q_in_last = None
q_out_last = None
def zone_boundary():
context.after_zone_boundary = True
def try_out_f__q_errors():
"""Uncomment each set of statements to test f__q() exceptions and error messages."""
# f__q(object(), '0q80')
# f__q(0.0, object())
# f__q(0.0, '0q80', object())
# f__q(0.0, 'nonsense')
# f__q(sys.float_info.max, '0q')
# f__q(1.0, '0q82____01') # Both reports: f == f <--- q and f ---> q !!!= q
# f__q(0.0, '0q80')
# f__q(1.0, '0q82_01')
# f__q(1.0, '0q82_01', '0q82_0001')
# f__q(1.0, '0q82_01')
# NOTE: Can't trigger "Qiki Number output out of sequence..." without a bug in Number.
# f__q(2.0, '0q82_02')
# f__q(0.0, '0q80')
# f__q(0.0, '0q80')
# zone_boundary()
# f__q(0.0, '0q80')
try_out_f__q_errors()
f__q(float('+inf'), '0qFF_81')
zone_boundary()
if LUDICROUS_NUMBER_SUPPORT:
# noinspection PyUnresolvedReferences
m__s(mpmath.power(2,1024), '0qFF000080_01') # A smidgen too big for floating point
f__q(1.7976931348623157e+308, '0qFF00007F_FFFFFFFFFFFFF8') # Largest IEEE-754 64-bit floating point number -- a little ways into Zone.LUDICROUS_LARGE
f__q(math.pow(2,1000), '0qFF00007D_01') # TODO: Smallest Ludicrously Large number: +2 ** +1000.
else:
f__q(float('+inf'), '0qFF_81', '0qFF00FFFF_5F5E00FF_01') # 2**99999999, a ludicrously large positive number
f__q(float('+inf'), '0qFF_81', '0qFF000080_01') # A smidgen too big for floating point
zone_boundary()
f__q(1.0715086071862672e+301, '0qFE_FFFFFFFFFFFFF8') # Largest reasonable number that floating point can represent, 2**1000 - 2**947
f__q(5.3575430359313366e+300, '0qFE_80')
f__q(math.pow(2,999), '0qFE_80') # Largest reasonable integral power of 2: +2 ** +999.
f__q(math.pow(2,992), '0qFE_01')
f__q(math.pow(2,880), '0qF0_01')
f__q(2.04586912993508844e+149, '0qBF_FFFFFFFFFFFFF8')
f__q( 1e100+1.0, '0qAB_1249AD2594C37D', '0qAB_1249AD2594C37CEB0B2784C4CE0BF38ACE408E211A7CAAB24308A82E8F10000000000000000000000001') # googol+1 (though float can't distinguish)
f__q( 1e100, '0qAB_1249AD2594C37D', '0qAB_1249AD2594C37CEB0B2784C4CE0BF38ACE408E211A7CAAB24308A82E8F10') # googol, or as close to it as float can get
f__q( 1e25, '0q8C_0845951614014880')
f__q( 1e10, '0q86_02540BE4')
f__q(4294967296.0, '0q86_01')
f__q(4294967296.0, '0q86_01', '0q86') # 0q86 is an alias for +256**4, the official code being 0q86_01
f__q( 16777216.0, '0q85_01')
f__q( 65536.0, '0q84_01')
f__q( 32768.0, '0q83_80')
f__q( 16384.0, '0q83_40')
f__q( 8192.0, '0q83_20')
f__q( 4096.0, '0q83_10')
f__q( 2048.0, '0q83_08')
f__q( 1234.567890123456789, '0q83_04D291613F43F8')
f__q( 1234.5678901234, '0q83_04D291613F43B980')
f__q( 1234.56789, '0q83_04D291613D31B9C0')
f__q( 1111.1111112, '0q83_04571C71C89A3840')
f__q( 1111.111111111111313, '0q83_04571C71C71C72') # XXX: use numpy.nextafter(1111.111111111111111, 1) or something -- http://stackoverflow.com/a/6163157/673991
f__q( 1111.111111111111111, '0q83_04571C71C71C71C0') # float has just under 17 significant digits
f__q( 1111.1111111, '0q83_04571C71C6ECB9')
f__q( 1024.0, '0q83_04')
f__q( 1000.0, '0q83_03E8')
f__q( 512.0, '0q83_02')
f__q( 258.0, '0q83_0102')
f__q( 257.0, '0q83_0101')
f__q( 256.0, '0q83_01')
f__q( 256.0, '0q83_01', '0q83') # alias for +256
f__q( 256.0, '0q83_01', '0q82_FFFFFFFFFFFFFC')
f__q( 255.9999999999999801, '0q82_FFFFFFFFFFFFF8') # 53 bits in the float mantissa
f__q( 255.5, '0q82_FF80')
f__q( 255.0, '0q82_FF')
f__q( 254.0, '0q82_FE')
f__q( 216.0, '0q82_D8')
f__q( 128.0, '0q82_80')
f__q( 100.0, '0q82_64')
f__q(math.pi*2, '0q82_06487ED5110B46')
f__q(math.pi, '0q82_03243F6A8885A3') # 50-bit pi mantissa? Next qigit: '08'.
f__q( 3.0, '0q82_03')
f__q(math.exp(1), '0q82_02B7E151628AED20') # 53-bit mantissa for e.
f__q( 2.5, '0q82_0280')
f__q( 2.4, '0q82_0266666666666660')
f__q( 2.3, '0q82_024CCCCCCCCCCCC0')
f__q( 2.2, '0q82_0233333333333340')
f__q( 2.1, '0q82_02199999999999A0')
f__q( 2.0, '0q82_02')
f__q( 1.875, '0q82_01E0')
f__q( 1.75, '0q82_01C0')
f__q(math.sqrt(3), '0q82_01BB67AE8584CAA0')
f__q( 1.6666666666666666, '0q82_01AAAAAAAAAAAAA0')
f__q((1+math.sqrt(5))/2, '0q82_019E3779B97F4A80') # golden ratio
f__q( 1.6, '0q82_01999999999999A0')
f__q( 1.5333333333333333, '0q82_0188888888888880')
f__q( 1.5, '0q82_0180')
f__q( 1.4666666666666666, '0q82_0177777777777770')
f__q(math.sqrt(2), '0q82_016A09E667F3BCD0')
f__q( 1.4, '0q82_0166666666666660')
f__q( 1.3333333333333333, '0q82_0155555555555550')
f__q( 1.3, '0q82_014CCCCCCCCCCCD0')
f__q( 1.2666666666666666, '0q82_0144444444444440')
f__q( 1.25, '0q82_0140')
f__q( 1.2, '0q82_0133333333333330')
f__q( 1.1333333333333333, '0q82_0122222222222220')
f__q( 1.125, '0q82_0120')
f__q( 1.1, '0q82_01199999999999A0')
f__q( 1.0666666666666666, '0q82_0111111111111110')
f__q( 1.0625, '0q82_0110')
f__q(math.pow(2, 1/12.0), '0q82_010F38F92D979630') # semitone (twelfth of an octave)
f__q( 1.03125, '0q82_0108')
f__q( 1.015625, '0q82_0104')
f__q( 1.01, '0q82_01028F5C28F5C290')
f__q( 1.0078125, '0q82_0102')
f__q( 1.00390625, '0q82_0101')
f__q( 1.001953125, '0q82_010080')
f__q( 1.001, '0q82_01004189374BC6A0')
f__q( 1.0009765625, '0q82_010040')
f__q( 1.00048828125, '0q82_010020')
f__q( 1.000244140625, '0q82_010010')
f__q( 1.0001, '0q82_0100068DB8BAC710')
f__q( 1.00001, '0q82_010000A7C5AC4720')
f__q( 1.000001, '0q82_01000010C6F7A0B0')
f__q( 1.0000001, '0q82_01000001AD7F29B0')
f__q( 1.00000001, '0q82_010000002AF31DC0')
f__q( 1.000000001, '0q82_01000000044B83')
f__q( 1.0000000001, '0q82_01000000006DF380')
f__q( 1.00000000001, '0q82_01000000000AFEC0')
f__q( 1.000000000001, '0q82_0100000000011980')
f__q( 1.0000000000001, '0q82_0100000000001C20')
f__q( 1.00000000000001, '0q82_01000000000002D0')
f__q( 1.000000000000001, '0q82_0100000000000050')
f__q( 1.00000000000000067,'0q82_0100000000000030')
f__q( 1.00000000000000067,'0q82_0100000000000030', '0q82_01000000000000280001')
f__q( 1.00000000000000044,'0q82_0100000000000020', '0q82_0100000000000028')
f__q( 1.00000000000000044,'0q82_0100000000000020')
f__q( 1.00000000000000044,'0q82_0100000000000020', '0q82_0100000000000018')
f__q( 1.00000000000000022,'0q82_0100000000000010', '0q82_0100000000000017FFFF') # alternated rounding?
f__q( 1.00000000000000022,'0q82_0100000000000010')
f__q( 1.00000000000000022,'0q82_0100000000000010', '0q82_01000000000000080001')
f__q( 1.0 ,'0q82_01', '0q82_0100000000000008') # so float granularity [1.0,2.0) is 2**-52 ~~ 22e-17
f__q( 1.0, '0q82_01')
f__q( 1.0, '0q82_01', '0q82') # alias for +1
zone_boundary()
f__q( 0.99999237060546875,'0q81FF_FFFF80')
f__q( 0.9998779296875, '0q81FF_FFF8')
f__q( 0.999, '0q81FF_FFBE76C8B43958') # 999/1000
f__q( 0.998046875, '0q81FF_FF80')
f__q( 0.998, '0q81FF_FF7CED916872B0') # 998/1000
f__q( 0.9972222222222222, '0q81FF_FF49F49F49F4A0') # 359/360
f__q( 0.9944444444444445, '0q81FF_FE93E93E93E940') # 358/360
f__q( 0.99, '0q81FF_FD70A3D70A3D70') # 99/100
f__q( 0.98, '0q81FF_FAE147AE147AE0') # 98/100
f__q( 0.96875, '0q81FF_F8')
f__q( 0.9375, '0q81FF_F0')
f__q( 0.875, '0q81FF_E0')
f__q( 0.75, '0q81FF_C0')
f__q(math.sqrt(0.5), '0q81FF_B504F333F9DE68')
f__q( 0.5, '0q81FF_80')
f__q( 0.25, '0q81FF_40')
f__q( 0.125, '0q81FF_20')
f__q( 0.0625, '0q81FF_10')
f__q( 0.03125, '0q81FF_08')
f__q( 0.02, '0q81FF_051EB851EB851EC0') # 2/200
f__q( 0.015625, '0q81FF_04')
f__q( 0.01171875, '0q81FF_03')
f__q( 0.01, '0q81FF_028F5C28F5C28F60') # 1/100
f__q( 0.0078125, '0q81FF_02')
f__q( 0.005555555555555556,'0q81FF_016C16C16C16C170') # 2/360
f__q( 0.0039520263671875, '0q81FF_0103') # 259/65536
f__q( 0.003936767578125, '0q81FF_0102') # 258/65536
f__q( 0.0039215087890625, '0q81FF_0101') # 257/65536
f__q( 0.00390625, '0q81FF_01') # 256/65536 aka 1/256
f__q( 0.00390625, '0q81FF_01', '0q81FF') # 1/256 alias
f__q( 0.0038909912109375, '0q81FE_FF') # 255/65536
f__q( 0.003875732421875, '0q81FE_FE') # 254/65536
f__q( 0.0038604736328125, '0q81FE_FD') # 253/65536
f__q( 0.002777777777777778,'0q81FE_B60B60B60B60B8') # 1/360
f__q( 0.002, '0q81FE_83126E978D4FE0') # 2/1000
f__q( 0.001953125, '0q81FE_80')
f__q( 0.001, '0q81FE_4189374BC6A7F0') # 1/1000 = 0x0.004189374BC6A7EF9DB22D0E560 4189374BC6A7EF9DB22D0E560 ...
f__q( 0.0009765625, '0q81FE_40')
f__q( 0.00048828125, '0q81FE_20')
f__q( 0.000244140625, '0q81FE_10')
f__q( 0.0001220703125, '0q81FE_08')
f__q( 0.00006103515625, '0q81FE_04')
f__q( 0.000030517578125, '0q81FE_02')
f__q(math.pow(256, -2), '0q81FE_01')
f__q( 0.0000152587890625, '0q81FE_01') # 1/65536
f__q( 0.0000152587890625, '0q81FE_01', '0q81FE') # 1/65536 alias
f__q( 0.00000762939453125,'0q81FD_80')
f__q(math.pow(256, -3), '0q81FD_01')
f__q(math.pow(256, -4), '0q81FC_01')
f__q(math.pow(256, -10), '0q81F6_01')
f__q(math.pow(256, -100), '0q819C_01')
f__q(math.pow(256, -100), '0q819C_01', '0q819C') # alias for 256**-100
f__q(math.pow( 2, -991), '0q8184_02')
f__q(math.pow( 2, -992), '0q8184_01')
f__q(math.pow(256, -124), '0q8184_01')
f__q(math.pow( 2, -993), '0q8183_80')
f__q(math.pow( 2, -994), '0q8183_40')
f__q(math.pow( 2, -998), '0q8183_04')
f__q(math.pow( 2, -999), '0q8183_02')
f__q(math.pow( 2, -1000) + math.pow(2,-1052),
'0q8183_0100000000000010') # boldest reasonable float, near positive ludicrously small boundary
if LUDICROUS_NUMBER_SUPPORT:
f__q(math.pow( 2, -1000), 'something') # gentlest positive ludicrously small number
f__q(math.pow(256, -125), 'something')
else:
f__q(math.pow( 2, -1000), '0q8183_01') # gentlest positive ludicrously small number
f__q(math.pow(256, -125), '0q8183_01')
zone_boundary()
f__q( 0.0, '0q80', '0q80FF0000_FF4143E0_01') # +2**-99999999, a ludicrously small positive number
zone_boundary()
f__q( 0.0, '0q80', '0q807F') # +infinitesimal
zone_boundary()
f__q( 0.0, '0q80')
zone_boundary()
f__q( -0.0, '0q80', '0q7F81') # -infinitesimal
zone_boundary()
f__q( -0.0, '0q80', '0q7F00FFFF_00BEBC1F_80') # -2**-99999999, a ludicrously small negative number
zone_boundary()
if LUDICROUS_NUMBER_SUPPORT:
f__q(-math.pow(256, -125), 'something')
f__q(-math.pow( 2, -1000), 'something') # gentlest negative ludicrously small number
else:
f__q(-math.pow(256, -125), '0q7E7C_FF')
f__q(-math.pow( 2, -1000), '0q7E7C_FF') # gentlest negative ludicrously small number
f__q(-math.pow( 2, -1000) - math.pow(2,-1052),
'0q7E7C_FEFFFFFFFFFFFFF0') # boldest reasonable float, near negative ludicrously small boundary
f__q(-math.pow( 2, -999), '0q7E7C_FE')
f__q(-math.pow( 2, -998), '0q7E7C_FC')
f__q(-math.pow( 2, -994), '0q7E7C_C0')
f__q(-math.pow( 2, -993), '0q7E7C_80')
f__q(-math.pow(256, -124), '0q7E7B_FF')
f__q(-math.pow( 2, -992), '0q7E7B_FF')
f__q(-math.pow( 2, -991), '0q7E7B_FE')
f__q(-math.pow(256, -100), '0q7E63_FF', '0q7E64') # alias for -256**-100
f__q(-math.pow(256, -100), '0q7E63_FF')
f__q(-math.pow(256, -10), '0q7E09_FF')
f__q(-math.pow(256, -4), '0q7E03_FF')
f__q(-math.pow(256, -3), '0q7E02_FF')
f__q( | |
"""ILI9341 LCD/Touch module."""
from time import sleep
from math import cos, sin, pi, radians
from sys import implementation
import ustruct
from uio import BytesIO
def color565(r, g, b):
"""Return RGB565 color value.
Args:
r (int): Red value.
g (int): Green value.
b (int): Blue value.
"""
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class Display(object):
"""Serial interface for 16-bit color (5-6-5 RGB) IL9341 display.
Note: All coordinates are zero based.
"""
# Command constants from ILI9341 datasheet
NOP = const(0x00) # No-op
SWRESET = const(0x01) # Software reset
RDDID = const(0x04) # Read display ID info
RDDST = const(0x09) # Read display status
SLPIN = const(0x10) # Enter sleep mode
SLPOUT = const(0x11) # Exit sleep mode
PTLON = const(0x12) # Partial mode on
NORON = const(0x13) # Normal display mode on
RDMODE = const(0x0A) # Read display power mode
RDMADCTL = const(0x0B) # Read display MADCTL
RDPIXFMT = const(0x0C) # Read display pixel format
RDIMGFMT = const(0x0D) # Read display image format
RDSELFDIAG = const(0x0F) # Read display self-diagnostic
INVOFF = const(0x20) # Display inversion off
INVON = const(0x21) # Display inversion on
GAMMASET = const(0x26) # Gamma set
DISPLAY_OFF = const(0x28) # Display off
DISPLAY_ON = const(0x29) # Display on
SET_COLUMN = const(0x2A) # Column address set
SET_PAGE = const(0x2B) # Page address set
WRITE_RAM = const(0x2C) # Memory write
READ_RAM = const(0x2E) # Memory read
PTLAR = const(0x30) # Partial area
VSCRDEF = const(0x33) # Vertical scrolling definition
MADCTL = const(0x36) # Memory access control
VSCRSADD = const(0x37) # Vertical scrolling start address
PIXFMT = const(0x3A) # COLMOD: Pixel format set
FRMCTR1 = const(0xB1) # Frame rate control (In normal mode/full colors)
FRMCTR2 = const(0xB2) # Frame rate control (In idle mode/8 colors)
FRMCTR3 = const(0xB3) # Frame rate control (In partial mode/full colors)
INVCTR = const(0xB4) # Display inversion control
DFUNCTR = const(0xB6) # Display function control
PWCTR1 = const(0xC0) # Power control 1
PWCTR2 = const(0xC1) # Power control 2
PWCTRA = const(0xCB) # Power control A
PWCTRB = const(0xCF) # Power control B
VMCTR1 = const(0xC5) # VCOM control 1
VMCTR2 = const(0xC7) # VCOM control 2
RDID1 = const(0xDA) # Read ID 1
RDID2 = const(0xDB) # Read ID 2
RDID3 = const(0xDC) # Read ID 3
RDID4 = const(0xDD) # Read ID 4
GMCTRP1 = const(0xE0) # Positive gamma correction
GMCTRN1 = const(0xE1) # Negative gamma correction
DTCA = const(0xE8) # Driver timing control A
DTCB = const(0xEA) # Driver timing control B
POSC = const(0xED) # Power on sequence control
ENABLE3G = const(0xF2) # Enable 3 gamma control
PUMPRC = const(0xF7) # Pump ratio control
ROTATE = {
0: 0x88,
90: 0xE8,
180: 0x48,
270: 0x28
}
def __init__(self, spi, cs, dc, rst,
width=240, height=320, rotation=0):
"""Initialize OLED.
Args:
spi (Class Spi): SPI interface for OLED
cs (Class Pin): Chip select pin
dc (Class Pin): Data/Command pin
rst (Class Pin): Reset pin
width (Optional int): Screen width (default 240)
height (Optional int): Screen height (default 320)
rotation (Optional int): Rotation must be 0 default, 90. 180 or 270
"""
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.width = width
self.height = height
if rotation not in self.ROTATE.keys():
raise RuntimeError('Rotation must be 0, 90, 180 or 270.')
else:
self.rotation = self.ROTATE[rotation]
# Initialize GPIO pins and set implementation specific methods
if implementation.name == 'circuitpython':
self.cs.switch_to_output(value=True)
self.dc.switch_to_output(value=False)
self.rst.switch_to_output(value=True)
self.reset = self.reset_cpy
self.write_cmd = self.write_cmd_cpy
self.write_data = self.write_data_cpy
else:
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=1)
self.reset = self.reset_mpy
self.write_cmd = self.write_cmd_mpy
self.write_data = self.write_data_mpy
self.reset()
# Send initialization commands
self.write_cmd(self.SWRESET) # Software reset
sleep(.1)
self.write_cmd(self.PWCTRB, 0x00, 0xC1, 0x30) # Pwr ctrl B
self.write_cmd(self.POSC, 0x64, 0x03, 0x12, 0x81) # Pwr on seq. ctrl
self.write_cmd(self.DTCA, 0x85, 0x00, 0x78) # Driver timing ctrl A
self.write_cmd(self.PWCTRA, 0x39, 0x2C, 0x00, 0x34, 0x02) # Pwr ctrl A
self.write_cmd(self.PUMPRC, 0x20) # Pump ratio control
self.write_cmd(self.DTCB, 0x00, 0x00) # Driver timing ctrl B
self.write_cmd(self.PWCTR1, 0x23) # Pwr ctrl 1
self.write_cmd(self.PWCTR2, 0x10) # Pwr ctrl 2
self.write_cmd(self.VMCTR1, 0x3E, 0x28) # VCOM ctrl 1
self.write_cmd(self.VMCTR2, 0x86) # VCOM ctrl 2
self.write_cmd(self.MADCTL, self.rotation) # Memory access ctrl
self.write_cmd(self.VSCRSADD, 0x00) # Vertical scrolling start address
self.write_cmd(self.PIXFMT, 0x55) # COLMOD: Pixel format
self.write_cmd(self.FRMCTR1, 0x00, 0x18) # Frame rate ctrl
self.write_cmd(self.DFUNCTR, 0x08, 0x82, 0x27)
self.write_cmd(self.ENABLE3G, 0x00) # Enable 3 gamma ctrl
self.write_cmd(self.GAMMASET, 0x01) # Gamma curve selected
self.write_cmd(self.GMCTRP1, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00)
self.write_cmd(self.GMCTRN1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F)
self.write_cmd(self.SLPOUT) # Exit sleep
sleep(.1)
self.write_cmd(self.DISPLAY_ON) # Display on
sleep(.1)
self.clear()
def block(self, x0, y0, x1, y1, data):
"""Write a block of data to display.
Args:
x0 (int): Starting X position.
y0 (int): Starting Y position.
x1 (int): Ending X position.
y1 (int): Ending Y position.
data (bytes): Data buffer to write.
"""
self.write_cmd(self.SET_COLUMN, *ustruct.pack(">HH", x0, x1))
self.write_cmd(self.SET_PAGE, *ustruct.pack(">HH", y0, y1))
self.write_cmd(self.WRITE_RAM)
self.write_data(data)
def cleanup(self):
"""Clean up resources."""
self.clear()
self.display_off()
self.spi.deinit()
print('display off')
def clear(self, color=0):
"""Clear display.
Args:
color (Optional int): RGB565 color value (Default: 0 = Black).
"""
w = self.width
h = self.height
# Clear display in 1024 byte blocks
if color:
line = color.to_bytes(2, 'big') * (w * 8)
else:
line = bytearray(w * 16)
for y in range(0, h, 8):
self.block(0, y, w - 1, y + 7, line)
def contrast(self, level):
"""Set display contrast to specified level.
Args:
level (int): Contrast level (0 - 15).
Note:
Can pass list to specifiy
"""
assert(0 <= level < 16)
self.write_cmd(self.CONTRAST_MASTER, level)
def display_off(self):
"""Turn display off."""
self.write_cmd(self.DISPLAY_OFF)
def display_on(self):
"""Turn display on."""
self.write_cmd(self.DISPLAY_ON)
def draw_circle(self, x0, y0, r, color):
"""Draw a circle.
Args:
x0 (int): X coordinate of center point.
y0 (int): Y coordinate of center point.
r (int): Radius.
color (int): RGB565 color value.
"""
f = 1 - r
dx = 1
dy = -r - r
x = 0
y = r
self.draw_pixel(x0, y0 + r, color)
self.draw_pixel(x0, y0 - r, color)
self.draw_pixel(x0 + r, y0, color)
self.draw_pixel(x0 - r, y0, color)
while x < y:
if f >= 0:
y -= 1
dy += 2
f += dy
x += 1
dx += 2
f += dx
self.draw_pixel(x0 + x, y0 + y, color)
self.draw_pixel(x0 - x, y0 + y, color)
self.draw_pixel(x0 + x, y0 - y, color)
self.draw_pixel(x0 - x, y0 - y, color)
self.draw_pixel(x0 + y, y0 + x, color)
self.draw_pixel(x0 - y, y0 + x, color)
self.draw_pixel(x0 + y, y0 - x, color)
self.draw_pixel(x0 - y, y0 - x, color)
def draw_ellipse(self, x0, y0, a, b, color):
"""Draw an ellipse.
Args:
x0, y0 (int): Coordinates of center point.
a (int): Semi axis horizontal.
b (int): Semi axis vertical.
color (int): RGB565 color value.
Note:
The center point is the center of the x0,y0 pixel.
Since pixels are not divisible, the axes are integer rounded
up to complete on a full pixel. Therefore the major and
minor axes are increased by 1.
"""
a2 = a * a
b2 = b * b
twoa2 = a2 + a2
twob2 = b2 + b2
x = 0
y = b
px = 0
py = twoa2 * y
# Plot initial points
self.draw_pixel(x0 + x, y0 + y, color)
self.draw_pixel(x0 - x, y0 + y, color)
self.draw_pixel(x0 + x, y0 - y, color)
self.draw_pixel(x0 - x, y0 - y, color)
# Region 1
p = round(b2 - (a2 * b) + (0.25 * a2))
while px < py:
x += 1
px += twob2
if p < 0:
p += b2 + px
else:
y -= 1
py -= twoa2
p += b2 + px - py
self.draw_pixel(x0 + x, y0 + y, color)
self.draw_pixel(x0 - x, y0 + y, color)
self.draw_pixel(x0 + | |
from flask import session,request,url_for,redirect, flash, current_app
from functools import wraps
import os,time
from lightserv import db_lightsheet, db_admin
import datajoint as dj
import paramiko
import subprocess
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
''' Make the file handler to deal with logging to file '''
file_handler = logging.FileHandler('logs/main_utils.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler() # level already set at debug from logger.setLevel() above
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
def table_sorter(dic,sort_key):
if type(dic[sort_key]) == str:
return (dic[sort_key] is None,dic[sort_key].lower())
else:
return (dic[sort_key] is None, dic[sort_key])
def log_http_requests(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
current_user = session['user']
except:
current_user = 'logged out user'
logstr = '{0} {1} request to route: "{2}()" in {3}'.\
format(current_user,request.method,f.__name__,f.__module__)
user_agent = request.user_agent
browser_name = user_agent.browser # e.g. chrome
browser_version = user_agent.version # e.g. '78.0.3904.108'
platform = user_agent.platform # e.g. linux
insert_dict = {'browser_name':browser_name,'browser_version':browser_version,
'event':logstr,'platform':platform}
db_admin.UserActionLog().insert1(insert_dict)
return f(*args, **kwargs)
return decorated_function
def logged_in(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session:
return f(*args, **kwargs)
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_admin(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
if current_user == 'ahoag':
logger.info(f"{current_user} is an admin and can access the admin page")
return f(*args, **kwargs)
else:
logger.info(f"Current user: {current_user} is not an admin and tried to access the admin page. "
"Denying them access")
flash(f'''That page is restricted''','danger')
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_dash_admin(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
if current_user in current_app.config['DASHBOARD_ADMINS']:
logger.info(f"{current_user} is a dash admin and can access the admin page")
return f(*args, **kwargs)
else:
logger.info(f"Current user: {current_user} is not an admin and tried to access the admin page. "
"Denying them access")
flash(f'''That page is restricted''','danger')
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def request_exists(f):
@wraps(f)
def decorated_function(*args, **kwargs):
username = kwargs['username']
request_name = kwargs['request_name']
request_contents = db_lightsheet.Request() & \
f'request_name="{request_name}"' & f'username="{username}"'
if len(request_contents) == 0:
flash("That request does not exist","danger")
return redirect(url_for('requests.all_requests'))
else:
return f(*args, **kwargs)
return decorated_function
def logged_in_as_clearer(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
request_name = kwargs['request_name']
username = kwargs['username']
clearing_batch_number = kwargs['clearing_batch_number']
clearing_batch_contents = db_lightsheet.Request.ClearingBatch() & \
f'request_name="{request_name}"' & f'username="{username}"' & \
f'clearing_batch_number={clearing_batch_number}'
if len(clearing_batch_contents) == 0:
flash("No clearing batch exists with those parameters. Please try again.","danger")
logger.debug("No clearing batch exists with those parameters. Redirecting to all requests page")
return redirect(url_for('requests.all_requests'))
clearer = clearing_batch_contents.fetch1('clearer')
''' check to see if user assigned themself as clearer '''
if clearer == None:
logger.info("Clearing entry form accessed with clearer not yet assigned. ")
''' now check to see if user is a designated clearer '''
if current_user in current_app.config['CLEARING_ADMINS']: #
# logger.info(f"Current user: {current_user} is a designated clearer and is now assigned as the clearer")
clearing_batch_update_dict = clearing_batch_contents.fetch1()
clearing_batch_update_dict['clearer'] = current_user
db_lightsheet.Request.ClearingBatch().update1(clearing_batch_update_dict)
logger.info(f"Current user: {current_user} is a designated clearer and is now assigned as the clearer")
return f(*args, **kwargs)
else: # user is not a designated clearer and did not self assign
logger.info(f"""Current user: {current_user} is not a designated clearer and did not specify themselves
as the clearer when submitting request. Denying them access""")
flash('''You do not have permission to access the clearing form for this experiment.
Please email us at <EMAIL> if you think there has been a mistake.''','warning')
return redirect(url_for('main.welcome'))
else: # clearer is assigned - only allow access to clearing entry to them
if current_user == clearer:
logger.info(f"{current_user} is the rightful clearer and accessed the clearing entry form")
return f(*args, **kwargs)
elif current_user in current_app.config['MASTER_ADMINS']:
logger.info(f"{current_user} is not the original clearer but is a master admin and accessed the clearing entry form")
return f(*args, **kwargs)
else:
logger.info(f"Current user: {current_user} is not the clearer, who has already been assigned."
"Denying them access")
flash(f'''The clearer has already been assigned for this entry and you, {current_user}, are not them.
Please email us at <EMAIL> if you think there has been a mistake.''','warning')
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_processor(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
request_name = kwargs['request_name']
sample_name = kwargs['sample_name']
username = kwargs['username']
imaging_request_number = kwargs['imaging_request_number']
processing_request_number = kwargs['processing_request_number']
processing_request_contents = db_lightsheet.Request.ProcessingRequest() & f'request_name="{request_name}"' & \
f'username="{username}"' & f'sample_name="{sample_name}"' & \
f'imaging_request_number="{imaging_request_number}"' & \
f'processing_request_number="{processing_request_number}"'
processor = processing_request_contents.fetch1('processor')
# ''' check to see if user assigned themself as processor '''
if current_user != processor:
flash(("The processor has already been assigned for this entry "
"and you are not them. Please email us at <EMAIL> "
"if you think there has been a mistake."),'warning')
return redirect(url_for('processing.processing_manager'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_clearing_manager(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
if current_user in current_app.config['CLEARING_ADMINS']: # the clearing managers and admins
logger.info(f"Current user: {current_user} is a clearing manager. Allowing them access.")
return f(*args, **kwargs)
else:
logger.info(f"""Current user: {current_user} is not a clearing manager. Denying them access""")
flash('''You do not have access to this page.
Please email us at <EMAIL> if you think there has been a mistake.''',
'warning')
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_imager(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
logger.debug(f"User is logged in as: {current_user}")
username = kwargs['username']
request_name = kwargs['request_name']
clearing_batch_number = kwargs['clearing_batch_number']
imaging_request_number = kwargs['imaging_request_number']
imaging_batch_number = kwargs['imaging_batch_number']
imaging_batch_contents = db_lightsheet.Request.ImagingBatch() & kwargs
if len(imaging_batch_contents) == 0:
flash("No imaging batch exists with those parameters. Please try again.","danger")
logger.debug("No imaging request exists with those parameters. Redirecting to all requests page")
return redirect(url_for('requests.all_requests'))
imager = imaging_batch_contents.fetch1('imager')
logger.debug(f"Imager is: {imager}")
''' check to see if user assigned themself as imager '''
if imager == None: # not yet assigned
logger.info("Imaging entry form accessed with imager not yet assigned. ")
''' now check to see if user is a designated imager '''
if current_user in current_app.config['IMAGING_ADMINS']:
imaging_batch_update_dict = imaging_batch_contents.fetch1()
imaging_batch_update_dict['imager'] = current_user
db_lightsheet.Request.ImagingBatch().update1(imaging_batch_update_dict)
logger.info(f"{current_user} is a designated imager and is now assigned as the imager")
return f(*args, **kwargs)
else: # user is not a designated imager and did not self assign
logger.info(f"""Current user: {current_user} is not a designated imager and did not specify themselves
as the imager when submitting request. Denying them access""")
flash('''You do not have permission to access the imaging form for this experiment.
Please email us at <EMAIL> if you think there has been a mistake.''','warning')
return redirect(url_for('main.welcome'))
else: # imager is assigned
if imager in current_app.config['IMAGING_ADMINS']: # one of the admins started the form
logger.debug("Imager is an admin")
if current_user in current_app.config['IMAGING_ADMINS']: # one of the admins is accessing the form
if current_user != imager:
logger.debug(f"""Current user: {current_user} accessed the form of which {imager} is the imager""")
flash("While you have access to this page, "
"you are not the primary imager "
"so please proceed with caution.",'warning')
return f(*args, **kwargs)
else:
logger.info(f"""Current user: {current_user} is the rightful imager and so is allowed access""")
return f(*args, **kwargs)
else:
flash(("The imager has already been assigned for this entry "
"and you are not them. Please email us at <EMAIL> "
"if you think there has been a mistake."),'warning')
return redirect(url_for("requests.request_overview",
username=username,request_name=request_name))
elif imager == current_user:
logger.info(f"Current user: {current_user} is the rightful imager and so is allowed access")
return f(*args, **kwargs)
else:
logger.info(f"""Current user: {current_user} is not the imager. Denying them access""")
flash(("The imager has already been assigned for this entry "
"and you are not them. Please email us at <EMAIL> "
"if you think there has been a mistake."),'warning')
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def image_manager(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
if current_user in current_app.config['IMAGING_ADMINS']: # admin rights
logger.info(f"Current user: {current_user} is an imaging admin. Allowing them access")
return f(*args, **kwargs)
else:
logger.info(f"Current user: {current_user} is not an imaging admin. Denying them access.")
flash("You do not have access to this page. \
Please email us at <EMAIL> if you think there has been a mistake.")
return redirect(url_for('main.welcome'))
else:
next_url = request.url
login_url = '%s?next=%s' % (url_for('main.login'), next_url)
return redirect(login_url)
return decorated_function
def logged_in_as_processor(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' in session: # user is logged in
current_user = session['user']
username = kwargs['username']
request_name = kwargs['request_name']
sample_name = kwargs['sample_name']
imaging_request_number = kwargs['imaging_request_number']
processing_request_number = kwargs['processing_request_number']
processing_request_contents = db_lightsheet.Request.ProcessingRequest() & f'request_name="{request_name}"' & \
f'username="{username}"' & f'sample_name="{sample_name}"' & \
f'imaging_request_number="{imaging_request_number}"' & \
f'processing_request_number="{processing_request_number}"'
if len(processing_request_contents) == 0:
flash("No processing request exists with those parameters. Please try again.","danger")
logger.debug("No processing request exists with those parameters. Redirecting to all requests page")
return redirect(url_for('requests.all_requests'))
processor = processing_request_contents.fetch1('processor')
''' check to see if user assigned themself as processor '''
if processor == None: # not yet assigned
logger.info("processing entry form accessed with processor not yet assigned. ")
''' now check to see if user is a designated processor '''
if current_user in current_app.config['PROCESSING_ADMINS']: #
processing_request_update_dict = processing_request_contents.fetch1()
processing_request_update_dict['processor'] = current_user
db_lightsheet.Request.ProcessingRequest().update1(processing_request_update_dict)
logger.info(f"{current_user} is a designated processor and is now assigned as the processor")
return f(*args, **kwargs)
elif current_user == username:
logger.info(f"Current user: {current_user} = username for this request, so assigning them as the processor")
return f(*args, **kwargs)
else: # | |
region corners list file %s does not exist" % region_corners_path
)
# read in the region corners data
with open(region_corners_path, 'r', encoding='utf-8') as flf:
tiled_region_corners = json.load(flf)
tiled_region_corners = json_utils.rename_missing_fovs(tiled_region_corners)
# define the parameter dict to return
tiling_params = {}
# copy over the metadata values from tiled_region_corners to tiling_params
tiling_params = assign_metadata_vals(tiled_region_corners, tiling_params, ['fovs'])
# define the region_params dict
region_params = {rpf: [] for rpf in settings.REGION_PARAM_FIELDS}
# prompt the user for params associated with each tiled region
read_tiled_region_inputs(tiled_region_corners, region_params)
# need to copy fov metadata over, needed for generate_fov_list
tiling_params['fovs'] = copy.deepcopy(tiled_region_corners['fovs'])
# store the read in parameters in the region_params key
tiling_params['region_params'] = generate_region_info(region_params)
# whether to insert moly points between regions
moly_region_insert = read_tiling_param(
"Insert a moly point between each tiled region? \
If yes, you must provide a path to the example moly_FOV json file. Y/N: ",
"Error: moly point region parameter must be either Y or N",
lambda mri: mri in ['Y', 'N', 'y', 'n'],
dtype=str
)
# convert to uppercase to standardize
moly_region_insert = moly_region_insert.upper()
tiling_params['moly_region'] = moly_region_insert
# whether to insert moly points between fovs
moly_interval = read_tiling_param(
"Enter the FOV interval size to insert Moly points. If yes, you must provide \
a path to the example moly_FOV json file and enter the number of FOVs "
"between each Moly point. If no, enter 0: ",
"Error: moly interval must be 0 or a positive integer",
lambda mi: mi >= 0,
dtype=int
)
if moly_interval > 0:
tiling_params['moly_interval'] = moly_interval
return tiling_params
def generate_x_y_fov_pairs(x_range, y_range):
"""Given all x and y coordinates (in microns) a FOV can take,
generate all possible `(x, y)` pairings
Args:
x_range (list):
Range of x values a FOV can take
y_range (list):
Range of y values a FOV can take
Returns:
list:
Every possible `(x, y)` pair for a FOV
"""
# define a list to hold all the (x, y) pairs
all_pairs = []
# iterate over all combinations of x and y
for t in combinations((x_range, y_range), 2):
# compute the product of the resulting x and y list pair, append results
for pair in product(t[0], t[1]):
all_pairs.append(pair)
return all_pairs
def generate_x_y_fov_pairs_rhombus(top_left, top_right, bottom_left, bottom_right,
num_row, num_col):
"""Generates coordinates (in microns) of FOVs as defined by corners of a rhombus
Args:
top_left (XYCoord): coordinate of top left corner
top_right (XYCoord): coordinate of top right corner
bottom_left (XYCoord): coordinate of bottom right corner
bottom_right (XYCoord): coordiante of bottom right corner
num_row (int): number of fovs on row dimension
num_col (int): number of fovs on column dimension
Returns:
list: coordinates for all FOVs defined by region"""
# compute the vertical shift in the top and bottom row of the TMA
top_row_shift = top_right.y - top_left.y
bottom_row_shift = bottom_right.y - bottom_left.y
# average between the two will be used to increment indices
avg_row_shift = (top_row_shift + bottom_row_shift) / 2
# compute horizontal shift in the left and right column of the TMA
left_col_shift = bottom_left.x - top_left.x
right_col_shift = bottom_right.x - top_right.x
# average between the two will be used to increment indices
avg_col_shift = (left_col_shift + right_col_shift) / 2
# compute per-FOV adjustment
row_increment = avg_row_shift / (num_col - 1)
col_increment = avg_col_shift / (num_row - 1)
# compute baseline indices for a rectangle with same coords
row_dif = bottom_left.y - top_left.y
col_dif = top_right.x - top_left.x
row_baseline = row_dif / (num_row - 1)
col_baseline = col_dif / (num_col - 1)
pairs = []
for i in range(num_col):
for j in range(num_row):
x_coord = top_left.x + col_baseline * i + col_increment * j
y_coord = top_left.y + row_baseline * j + row_increment * i
pairs.append((int(x_coord), int(y_coord)))
return pairs
def generate_tiled_region_fov_list(tiling_params, moly_path: Optional[str] = None):
"""Generate the list of FOVs on the image from the `tiling_params` set for tiled regions
Moly point insertion: happens once every number of FOVs you specified in
`tiled_region_set_params`. There are a couple caveats to keep in mind:
- The interval specified will not reset between regions. In other words, if the interval is 3
and the next set of FOVs contains 2 in region 1 and 1 in region 2, the next Moly point will
be placed after the 1st FOV in region 2 (not after the 3rd FOV in region 2). Moly points
inserted between regions are ignored in this calculation.
- If the interval specified cleanly divides the number of FOVs in a region, a Moly point will
not be placed at the end of the region. Suppose 3 FOVs are defined along both the x- and
y-axis for region 1 (for a total of 9 FOVs) and a Moly point FOV interval of 3 is specified.
Without also setting Moly point insertion between different regions, a Moly point will NOT be
placed after the last FOV of region 1 (the next Moly point will appear after the 3rd
FOV in in region 2).
Args:
tiling_params (dict):
The tiling parameters created by `set_tiled_region_params`
moly_path (Optional[str]):
The path to the Moly point to insert between FOV intervals and/or regions.
If these insertion parameters are not specified in `tiling_params`, this won't be used.
Defaults to None.
Returns:
dict:
Data containing information about each FOV
"""
# file path validation
if (tiling_params.get("moly_region", "N") == "Y") or \
(tiling_params.get("moly_interval", 0) > 0):
if not os.path.exists(moly_path):
raise FileNotFoundError("The provided Moly FOV file %s does not exist. If you want\
to include Moly FOVs you must provide a valid path. Otherwise\
, select 'No' for the options relating to Moly FOVs"
% moly_path)
# read in the moly point data
with open(moly_path, 'r', encoding='utf-8') as mpf:
moly_point = json.load(mpf)
# define the fov_regions dict
fov_regions = {}
# copy over the metadata values from tiling_params to fov_regions
fov_regions = assign_metadata_vals(
tiling_params, fov_regions, ['region_params', 'moly_region', 'moly_interval']
)
# define a specific FOVs field in fov_regions, this will contain the actual FOVs
fov_regions['fovs'] = []
# define a counter to determine where to insert a moly point
# only used if moly_interval is set in tiling_params
# NOTE: total_fovs is used to prevent moly_counter from initiating the addition of
# a Moly point at the end of a region
moly_counter = 0
total_fovs = 0
# iterate through each region and append created fovs to fov_regions['fovs']
for region_index, region_info in enumerate(tiling_params['region_params']):
# extract start coordinates
start_row = region_info['region_start_row']
start_col = region_info['region_start_col']
# define the range of x- and y-coordinates to use
row_range = list(range(region_info['fov_num_row']))
col_range = list(range(region_info['fov_num_col']))
# create all pairs between two lists
row_col_pairs = generate_x_y_fov_pairs(row_range, col_range)
# name the FOVs according to MIBI conventions
fov_names = ['%s_R%dC%d' % (region_info['region_name'], y + 1, x + 1)
for x in range(region_info['fov_num_row'])
for y in range(region_info['fov_num_col'])]
# randomize pairs list if specified
if region_info['region_rand'] == 'Y':
# make sure the fov_names are set in the same shuffled indices for renaming
row_col_pairs, fov_names = shuffle(row_col_pairs, fov_names)
# update total_fovs, we'll prevent moly_counter from triggering the appending of
# a Moly point at the end of a region this way
total_fovs += len(row_col_pairs)
for index, (col_i, row_i) in enumerate(row_col_pairs):
# use the fov size to scale to the current x- and y-coordinate
cur_row = start_row - row_i * region_info['row_fov_size']
cur_col = start_col + col_i * region_info['col_fov_size']
# copy the fov metadata over and add cur_x, cur_y, and name
fov = copy.deepcopy(tiling_params['fovs'][region_index])
fov['centerPointMicrons']['x'] = cur_col
fov['centerPointMicrons']['y'] = cur_row
fov['name'] = fov_names[index]
# append value to fov_regions
fov_regions['fovs'].append(fov)
# increment moly_counter as we've added another fov
moly_counter += 1
# append a Moly point if moly_interval is set and we've reached the interval threshold
# the exception: don't insert a Moly point at the end of a region
if 'moly_interval' in tiling_params and \
moly_counter % tiling_params['moly_interval'] == 0 and \
moly_counter < total_fovs:
fov_regions['fovs'].append(moly_point)
# append Moly point to seperate regions if not last and if | |
<reponame>vmirage/YouTubeTV.bundle<filename>Contents/Code/__init__.py
# -*- coding: utf-8 -*-
# Copyright (c) 2014, KOL
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from urllib import urlencode
from time import time
from updater import Updater
Video = SharedCodeService.video
PREFIX = '/video/youtubetv'
ART = 'art-default.jpg'
ICON = 'icon-default.png'
TITLE = u'%s' % L('Title')
YT_CLIENT_ID = (
'383749313750-e0fj400djq4lukahnfjfqg6ckdbets63'
'.apps.googleusercontent.com'
)
YT_SECRET = '<KEY>'
YT_SCOPE = 'https://www.googleapis.com/auth/youtube'
YT_VERSION = 'v3'
ICONS = {
'likes': R('heart-full.png'),
'favorites': R('star-2.png'),
'uploads': R('outbox-2.png'),
'watchHistory': R('revert.png'),
'watchLater': R('clock.png'),
'subscriptions': R('podcast-2.png'),
'browseChannels': R('grid-2.png'),
'playlists': R('list.png'),
'whatToWhatch': R('home.png'),
'account': R('user-2.png'),
'categories': R('store.png'),
'options': R('settings.png'),
'suggestions': R('tag.png'),
'remove': R('bin.png'),
'next': R('arrow-right.png'),
'offline': R('power.png'),
'search': R('search.png'),
}
YT_EDITABLE = {
'watchLater': L('watchLater'),
'likes': L('I like this'),
'favorites': L('Add to favorites'),
}
YT_FEEDS = {
'_SB': {'u': 'feed/subscriptions', 'title': L('My Subscriptions')},
'HL': {'u': 'feed/history', 'title': L('watchHistory')},
'WL': {'u': 'playlist', 'title': L('watchLater')},
}
###############################################################################
# Init
###############################################################################
Plugin.AddViewGroup(
'details',
viewMode='InfoList',
type=ViewType.List,
summary=SummaryTextType.Long
)
def Start():
HTTP.CacheTime = CACHE_1HOUR
ValidatePrefs()
def ValidatePrefs():
loc = GetLanguage()
if Core.storage.file_exists(Core.storage.abs_path(
Core.storage.join_path(
Core.bundle_path,
'Contents',
'Strings',
'%s.json' % loc
)
)):
Locale.DefaultLocale = loc
else:
Locale.DefaultLocale = 'en-us'
###############################################################################
# Video
###############################################################################
@handler(PREFIX, TITLE, thumb=ICON)
def MainMenu(complete=False, offline=False):
oc = ObjectContainer(title2=TITLE, no_cache=True, replace_parent=False)
if offline:
ResetToken()
if not CheckToken():
oc.add(DirectoryObject(
key=Callback(Authorization),
title=u'%s' % L('Authorize'),
thumb=ICONS['options'],
))
if complete:
oc.header = L('Authorize')
oc.message = L('You must enter code for continue')
return oc
Updater(PREFIX+'/update', oc)
oc.add(DirectoryObject(
key=Callback(Feed, oid='_SB'),
title=u'%s' % L('My Subscriptions'),
thumb=ICONS['subscriptions'],
))
oc.add(DirectoryObject(
key=Callback(Category, title=L('What to Watch')),
title=u'%s' % L('What to Watch'),
thumb=ICONS['whatToWhatch'],
))
oc.add(DirectoryObject(
key=Callback(Playlists, uid='me', title=L('Playlists')),
title=u'%s' % L('Playlists'),
thumb=ICONS['playlists'],
))
oc.add(DirectoryObject(
key=Callback(Categories, title=L('Categories'), c_type='video'),
title=u'%s' % L('Categories'),
thumb=ICONS['categories'],
))
oc.add(DirectoryObject(
key=Callback(Categories, title=L('Browse channels'), c_type='guide'),
title=u'%s' % L('Browse channels'),
thumb=ICONS['browseChannels'],
))
oc.add(DirectoryObject(
key=Callback(Channel, oid='me', title=L('My channel')),
title=u'%s' % L('My channel'),
thumb=ICONS['account'],
))
FillChannelInfo(oc, 'me', ('watchLater', 'watchHistory', 'likes'))
oc.add(InputDirectoryObject(
key=Callback(
Search,
s_type='video',
title=u'%s' % L('Search Video')
),
title=u'%s' % L('Search'), prompt=u'%s' % L('Search Video'),
thumb=ICONS['search']
))
return AddSubscriptions(oc, uid='me')
@route(PREFIX + '/feed')
def Feed(oid, offset=None):
if not CheckToken():
return NoContents()
params = {
'access_token': Dict['access_token'],
'ajax': 1,
}
if offset:
path = 'feed'
params['action_continuation'] = 1
params.update(JSON.ObjectFromString(offset))
else:
path = YT_FEEDS[oid]['u']
if YT_FEEDS[oid]['u'] == 'playlist':
params['list'] = oid
path = YT_FEEDS[oid]['u']
try:
res = JSON.ObjectFromString(HTTP.Request(
'https://m.youtube.com/%s?%s' % (path, urlencode(params)),
headers={
'User-Agent': Video.USER_AGENT
}
).content[4:])['content']
except:
return NoContents()
if 'single_column_browse_results' in res:
for item in res['single_column_browse_results']['tabs']:
if 'selected' in item and item['selected'] is True:
res = item['content']
break
elif 'section_list' in res and len(res['section_list']['contents']):
for item in res['section_list']['contents']:
if item['item_type'] == 'playlist_video_list':
res = item
break
elif 'contents' in item:
for subitem in item['contents']:
if subitem['item_type'] == 'playlist_video_list':
res = subitem
break
else:
continue
break
elif 'continuation_contents' in res:
res = res['continuation_contents']
else:
return NoContents()
if not 'contents' in res or not len(res['contents']):
return NoContents()
ids = []
if 'continuations' in res and len(res['continuations']):
continuations = res['continuations']
else:
continuations = None
for item in res['contents']:
if 'continuations' in item and len(item['continuations']):
continuations = item['continuations']
vid = Video.GetFeedVid(item)
if vid is not None:
ids.append(vid)
continue
for subitem in item['contents']:
vid = Video.GetFeedVid(subitem)
if vid is not None:
ids.append(vid)
if not len(ids):
return NoContents()
oc = ObjectContainer(title2=u'%s' % YT_FEEDS[oid]['title'])
chunk_size = 50
extended = Prefs['my_subscriptions_extened'] if oid == '_SB' else Prefs['playlists_extened']
[AddVideos(
oc,
ApiGetVideos(ids=ids[i:i + chunk_size]),
extended=extended
) for i in xrange(0, len(ids), chunk_size)]
if continuations is None:
return oc
# Add offset
for item in continuations:
if item['item_type'] == 'next_continuation_data':
oc.add(NextPageObject(
key=Callback(
Feed,
oid=oid,
offset=JSON.StringFromObject({
'itct': item['click_tracking_params'],
'ctoken': item['continuation'],
}),
),
title=u'%s' % L('Next page'),
thumb=ICONS['next']
))
break
return oc
@route(PREFIX + '/video/view')
def VideoView(vid, **kwargs):
return URLService.MetadataObjectForURL(
url=Video.GetServiceURL(vid, Dict['access_token'], GetLanguage()),
in_container=True
)
@route(PREFIX + '/video/info')
def VideoInfo(vid, pl_item_id=None):
oc = ObjectContainer()
res = ApiGetVideos(ids=[vid])
AddVideos(oc, res, title=L('Play video'))
if not len(oc):
return NoContents()
item = res['items'][0]
oc.title2 = u'%s' % item['snippet']['localized']['title']
oc.add(DirectoryObject(
key=Callback(
Channel,
oid=item['snippet']['channelId'],
title=item['snippet']['channelTitle']
),
title=u'%s' % item['snippet']['channelTitle'],
thumb=ICONS['account'],
))
oc.add(DirectoryObject(
key=Callback(
Search,
title=L('Related videos'),
query=None,
relatedToVideoId=item['id']
),
title=u'%s' % L('Related videos'),
thumb=ICONS['suggestions'],
))
for key, title in YT_EDITABLE.items():
oc.add(DirectoryObject(
key=Callback(PlaylistAdd, aid=item['id'], key=key),
title=u'%s' % title,
thumb=ICONS[key],
))
if pl_item_id:
oc.add(DirectoryObject(
key=Callback(PlaylistRemove, pl_item_id=pl_item_id),
title=u'%s' % L('Remove from playlist'),
thumb=ICONS['remove'],
))
return AddItemsFromDescription(
oc,
item['snippet']['localized']['description']
)
@route(PREFIX + '/channels')
def Channels(oid, title, offset=None):
res = ApiRequest('channels', ApiGetParams(
categoryId=oid,
hl=GetLanguage(),
limit=Prefs['items_per_page'],
offset=offset
))
if not res or not len(res['items']):
return NoContents()
oc = ObjectContainer(
title2=u'%s' % title,
replace_parent=bool(offset)
)
for item in res['items']:
cid = item['id']
item = item['snippet']
oc.add(DirectoryObject(
key=Callback(
Channel,
oid=cid,
title=item['title']
),
title=u'%s' % item['title'],
summary=u'%s' % item['description'],
thumb=GetThumbFromSnippet(item),
))
if 'nextPageToken' in res:
oc.add(NextPageObject(
key=Callback(
Channels,
oid=oid,
title=title,
offset=res['nextPageToken'],
),
title=u'%s' % L('Next page'),
thumb=ICONS['next']
))
return oc
@route(PREFIX + '/channel')
def Channel(oid, title):
oc = ObjectContainer(
title2=u'%s' % title
)
# Add standart menu
FillChannelInfo(oc, oid)
if oid == 'me':
oc.add(DirectoryObject(
key=Callback(MainMenu, offline=True),
title=u'%s' % L('Sign out'),
thumb=ICONS['offline'],
))
return oc
oc.add(DirectoryObject(
key=Callback(
Subscriptions,
title=u'%s - %s' % (title, L('Subscriptions')),
uid=oid
),
title=u'%s' % L('Subscriptions'),
thumb=ICONS['subscriptions'],
))
oc.add(InputDirectoryObject(
key=Callback(
Search,
s_type='video',
channelId=oid,
title=u'%s' % L('Search Channel')
),
title=u'%s' % L('Search'), prompt=u'%s' % L('Search Channel'),
thumb=ICONS['search']
))
AddPlaylists(oc, uid=oid)
return oc
@route(PREFIX + '/user')
def User(username):
res = ApiRequest('channels', ApiGetParams(
forUsername=username,
hl=GetLanguage()
))
if not res or not len(res['items']):
return NoContents()
item = res['items'][0]
return Channel(item['id'], item['snippet']['localized']['title'])
@route(PREFIX + '/categories')
def Categories(title, c_type):
res = ApiRequest('%sCategories' % c_type, ApiGetParams(
regionCode=GetRegion(),
hl=GetLanguage()
))
if not res or not len(res['items']):
return NoContents()
oc = ObjectContainer(
title2=u'%s' % title
)
if c_type == 'guide':
c_callback = Channels
oc.add(InputDirectoryObject(
key=Callback(
Search,
s_type='channel',
title=u'%s' % L('Search channels')
),
title=u'%s' % L('Search'), prompt=u'%s' % L('Search channels'),
thumb=ICONS['search']
))
else:
c_callback = Category
for item in res['items']:
oc.add(DirectoryObject(
key=Callback(
c_callback,
title=item['snippet']['title'],
oid=item['id']
),
title=u'%s' % item['snippet']['title']
))
return oc
@route(PREFIX + '/category')
def Category(title, oid=0, offset=None):
oc = ObjectContainer(
title2=u'%s' % title,
replace_parent=bool(offset)
)
res = ApiGetVideos(
chart='mostPopular',
limit=Prefs['items_per_page'],
offset=offset,
regionCode=GetRegion(),
videoCategoryId=oid
)
AddVideos(oc, res, extended=Prefs['category_extened'])
if not len(oc):
return NoContents()
if 'nextPageToken' in res:
oc.add(NextPageObject(
key=Callback(
Category,
title=oc.title2,
oid=oid,
offset=res['nextPageToken'],
),
title=u'%s' % L('Next page'),
thumb=ICONS['next']
))
return oc
@route(PREFIX + '/playlists')
def Playlists(uid, title, offset=None):
oc = ObjectContainer(
title2=u'%s' % title,
replace_parent=bool(offset)
)
if not offset and uid == 'me':
FillChannelInfo(oc, uid, ('watchLater', 'likes', 'favorites'))
oc.add(InputDirectoryObject(
key=Callback(
Search,
s_type='playlist',
title=u'%s' % L('Search playlists')
),
title=u'%s' % L('Search'), prompt=u'%s' % L('Search playlists'),
thumb=ICONS['search']
))
return AddPlaylists(oc, uid=uid, offset=offset)
@route(PREFIX + '/playlist')
def Playlist(oid, title, can_edit=False, offset=None):
if oid in YT_FEEDS:
return Feed(oid)
res = ApiRequest('playlistItems', ApiGetParams(
part='contentDetails',
playlistId=oid,
offset=offset,
limit=Prefs['items_per_page']
))
if not res or not len(res['items']):
return NoContents()
oc = ObjectContainer(
title2=u'%s' % title,
replace_parent=bool(offset)
)
ids = []
pl_map = {}
can_edit = can_edit and can_edit != 'False'
for item in res['items']:
ids.append(item['contentDetails']['videoId'])
if can_edit:
pl_map[item['contentDetails']['videoId']] = item['id']
AddVideos(
oc,
ApiGetVideos(ids=ids),
extended=Prefs['playlists_extened'],
pl_map=pl_map
)
if 'nextPageToken' in res:
oc.add(NextPageObject(
key=Callback(
Playlist,
title=oc.title2,
oid=oid,
can_edit=can_edit,
offset=res['nextPageToken'],
),
title=u'%s' % L('Next page'),
thumb=ICONS['next']
))
return oc
@route(PREFIX + '/playlist/add')
def PlaylistAdd(aid, key=None, oid=None, a_type='video'):
if key is not None:
items = ApiGetChannelInfo('me')['playlists']
if key in items:
oid = items[key]
if not oid:
return ErrorMessage()
| |
import itertools
import numpy as np
from . import ffi, lib, backend, binary, monoid, semiring
from .base import BaseExpression, BaseType, call
from .dtypes import lookup_dtype, unify, _INDEX
from .exceptions import check_status, NoValue
from .expr import AmbiguousAssignOrExtract, IndexerResolver, Updater
from .mask import StructuralMask, ValueMask
from .operator import get_typed_op
from .vector import Vector, VectorExpression
from .scalar import Scalar, ScalarExpression, _CScalar
from .utils import (
ints_to_numpy_buffer,
values_to_numpy_buffer,
wrapdoc,
_CArray,
_Pointer,
class_property,
)
from . import expr
from ._ss.matrix import ss
ffi_new = ffi.new
class Matrix(BaseType):
"""
GraphBLAS Sparse Matrix
High-level wrapper around GrB_Matrix type
"""
__slots__ = "_nrows", "_ncols", "ss"
_is_transposed = False
_name_counter = itertools.count()
def __init__(self, gb_obj, dtype, *, name=None):
if name is None:
name = f"M_{next(Matrix._name_counter)}"
self._nrows = None
self._ncols = None
super().__init__(gb_obj, dtype, name)
# Add ss extension methods
self.ss = ss(self)
def __del__(self):
gb_obj = getattr(self, "gb_obj", None)
if gb_obj is not None:
# it's difficult/dangerous to record the call, b/c `self.name` may not exist
check_status(lib.GrB_Matrix_free(gb_obj), self)
def __repr__(self, mask=None):
from .formatting import format_matrix
from .recorder import skip_record
with skip_record:
return format_matrix(self, mask=mask)
def _repr_html_(self, mask=None):
from .formatting import format_matrix_html
from .recorder import skip_record
with skip_record:
return format_matrix_html(self, mask=mask)
def __reduce__(self):
# SS, SuiteSparse-specific: export
pieces = self.ss.export(raw=True)
return self._deserialize, (pieces, self.name)
@staticmethod
def _deserialize(pieces, name):
# SS, SuiteSparse-specific: import
return Matrix.ss.import_any(name=name, **pieces)
@property
def S(self):
return StructuralMask(self)
@property
def V(self):
return ValueMask(self)
def __delitem__(self, keys):
del Updater(self)[keys]
def __getitem__(self, keys):
resolved_indexes = IndexerResolver(self, keys)
return AmbiguousAssignOrExtract(self, resolved_indexes)
def __setitem__(self, keys, delayed):
Updater(self)[keys] = delayed
def __contains__(self, index):
extractor = self[index]
if not extractor.resolved_indexes.is_single_element:
raise TypeError(
f"Invalid index to Matrix contains: {index!r}. A 2-tuple of ints is expected. "
"Doing `(i, j) in my_matrix` checks whether a value is present at that index."
)
scalar = extractor.new(name="s_contains")
return not scalar.is_empty
def __iter__(self):
rows, columns, values = self.to_values()
return zip(rows.flat, columns.flat)
def isequal(self, other, *, check_dtype=False):
"""
Check for exact equality (same size, same empty values)
If `check_dtype` is True, also checks that dtypes match
For equality of floating point Vectors, consider using `isclose`
"""
self._expect_type(other, (Matrix, TransposedMatrix), within="isequal", argname="other")
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
if check_dtype:
common_dtype = self.dtype
else:
common_dtype = unify(self.dtype, other.dtype)
matches = Matrix.new(bool, self._nrows, self._ncols, name="M_isequal")
matches << self.ewise_mult(other, binary.eq[common_dtype])
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).value
def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False):
"""
Check for approximate equality (including same size and empty values)
If `check_dtype` is True, also checks that dtypes match
Closeness check is equivalent to `abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)`
"""
self._expect_type(other, (Matrix, TransposedMatrix), within="isclose", argname="other")
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
matches = self.ewise_mult(other, binary.isclose(rel_tol, abs_tol)).new(
dtype=bool, name="M_isclose"
)
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).value
@property
def nrows(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nrows", empty=True)
call("GrB_Matrix_nrows", [_Pointer(scalar), self])
return n[0]
@property
def ncols(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_ncols", empty=True)
call("GrB_Matrix_ncols", [_Pointer(scalar), self])
return n[0]
@property
def shape(self):
return (self._nrows, self._ncols)
@property
def nvals(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
call("GrB_Matrix_nvals", [_Pointer(scalar), self])
return n[0]
@property
def _nvals(self):
"""Like nvals, but doesn't record calls"""
n = ffi_new("GrB_Index*")
check_status(lib.GrB_Matrix_nvals(n, self.gb_obj[0]), self)
return n[0]
@property
def T(self):
return TransposedMatrix(self)
def clear(self):
call("GrB_Matrix_clear", [self])
def resize(self, nrows, ncols):
nrows = _CScalar(nrows)
ncols = _CScalar(ncols)
call("GrB_Matrix_resize", [self, nrows, ncols])
self._nrows = nrows.scalar.value
self._ncols = ncols.scalar.value
def to_values(self, *, dtype=None):
"""
GrB_Matrix_extractTuples
Extract the rows, columns and values as a 3-tuple of numpy arrays
"""
nvals = self._nvals
rows = _CArray(size=nvals, name="&rows_array")
columns = _CArray(size=nvals, name="&columns_array")
values = _CArray(size=nvals, dtype=self.dtype, name="&values_array")
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
scalar.value = nvals
call(
f"GrB_Matrix_extractTuples_{self.dtype.name}",
[rows, columns, values, _Pointer(scalar), self],
)
values = values.array
if dtype is not None:
dtype = lookup_dtype(dtype)
if dtype != self.dtype:
values = values.astype(dtype.np_type) # copies
return (
rows.array,
columns.array,
values,
)
def build(self, rows, columns, values, *, dup_op=None, clear=False, nrows=None, ncols=None):
# TODO: accept `dtype` keyword to match the dtype of `values`?
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, self.dtype)
n = values.size
if rows.size != n or columns.size != n:
raise ValueError(
f"`rows` and `columns` and `values` lengths must match: "
f"{rows.size}, {columns.size}, {values.size}"
)
if clear:
self.clear()
if nrows is not None or ncols is not None:
if nrows is None:
nrows = self.nrows
if ncols is None:
ncols = self.ncols
self.resize(nrows, ncols)
if n == 0:
return
dup_op_given = dup_op is not None
if not dup_op_given:
dup_op = binary.plus
dup_op = get_typed_op(dup_op, self.dtype)
if dup_op.opclass == "Monoid":
dup_op = dup_op.binaryop
else:
self._expect_op(dup_op, "BinaryOp", within="build", argname="dup_op")
rows = _CArray(rows)
columns = _CArray(columns)
values = _CArray(values, dtype=self.dtype)
call(
f"GrB_Matrix_build_{self.dtype.name}",
[self, rows, columns, values, _CScalar(n), dup_op],
)
# Check for duplicates when dup_op was not provided
if not dup_op_given and self._nvals < n:
raise ValueError("Duplicate indices found, must provide `dup_op` BinaryOp")
def dup(self, *, dtype=None, mask=None, name=None):
"""
GrB_Matrix_dup
Create a new Matrix by duplicating this one
"""
if dtype is not None or mask is not None:
if dtype is None:
dtype = self.dtype
rv = Matrix.new(dtype, nrows=self._nrows, ncols=self._ncols, name=name)
rv(mask=mask)[:, :] << self
else:
new_mat = ffi_new("GrB_Matrix*")
rv = Matrix(new_mat, self.dtype, name=name)
call("GrB_Matrix_dup", [_Pointer(rv), self])
rv._nrows = self._nrows
rv._ncols = self._ncols
return rv
def wait(self):
"""
GrB_Matrix_wait
In non-blocking mode, the computations may be delayed and not yet safe
to use by multiple threads. Use wait to force completion of a Matrix
and make it safe to use as input parameters on multiple threads.
"""
call("GrB_Matrix_wait", [_Pointer(self)])
@classmethod
def new(cls, dtype, nrows=0, ncols=0, *, name=None):
"""
GrB_Matrix_new
Create a new empty Matrix from the given type, number of rows, and number of columns
"""
new_matrix = ffi_new("GrB_Matrix*")
dtype = lookup_dtype(dtype)
rv = cls(new_matrix, dtype, name=name)
if type(nrows) is not _CScalar:
nrows = _CScalar(nrows)
if type(ncols) is not _CScalar:
ncols = _CScalar(ncols)
call("GrB_Matrix_new", [_Pointer(rv), dtype, nrows, ncols])
rv._nrows = nrows.scalar.value
rv._ncols = ncols.scalar.value
return rv
@classmethod
def from_values(
cls,
rows,
columns,
values,
*,
nrows=None,
ncols=None,
dup_op=None,
dtype=None,
name=None,
):
"""Create a new Matrix from the given lists of row indices, column
indices, and values. If nrows or ncols are not provided, they
are computed from the max row and coumn index found.
"""
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, dtype)
# Compute nrows and ncols if not provided
if nrows is None:
if rows.size == 0:
raise ValueError("No row indices provided. Unable to infer nrows.")
nrows = int(rows.max()) + 1
if ncols is None:
if columns.size == 0:
raise ValueError("No column indices provided. Unable to infer ncols.")
ncols = int(columns.max()) + 1
# Create the new matrix
C = cls.new(dtype, nrows, ncols, name=name)
# Add the data
# This needs to be the original data to get proper error messages
C.build(rows, columns, values, dup_op=dup_op)
return C
@property
def _carg(self):
return self.gb_obj[0]
#########################################################
# Delayed methods
#
# These return a delayed expression object which must be passed
# to __setitem__ to trigger a call to GraphBLAS
#########################################################
def ewise_add(self, other, op=monoid.plus, *, require_monoid=True):
"""
GrB_Matrix_eWiseAdd
Result will contain the union of indices from both Matrices
Default op is monoid.plus.
Unless explicitly disabled, this method requires a monoid (directly or from a semiring).
The reason for this is that binary operators can create very confusing behavior when
only one of the two elements is present.
Examples:
- binary.minus where left=N/A and right=4 yields 4 rather than | |
from __future__ import print_function
import collections, gzip, time
import numpy as np
import tensorflow as tf
import sys, os
sys.path.append("../tuner_utils")
from yellowfin import YFOptimizer
import inspect
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 0.25
max_grad_norm = 20
num_layers = 3
num_steps = 50
hidden_size = 500
max_epoch = 14
max_max_epoch = 50
keep_prob = 0.5
# correction: for wsj model, we use 0.9.
lr_decay = 0.9
batch_size = 20
opt_method = None
log_dir = None
class PTBModel(object):
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=1.0,
# state_is_tuple=True)
# if is_training and config.keep_prob < 1:
# lstm_cell = tf.contrib.rnn.DropoutWrapper(
# lstm_cell, output_keep_prob=config.keep_prob)
# cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * config.num_layers,
# state_is_tuple=True)
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(inputs, num_steps, 1)]
# outputs, state = tf.contrib.rnn.static_rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) / batch_size
self._norm_loss = cost / num_steps
self._cost = loss
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._norm_loss, tvars),
config.max_grad_norm)
if config.opt_method == "Adam":
print("using Adam")
optimizer = tf.train.AdamOptimizer(self.lr)
elif config.opt_method == "YF":
print("using YF")
self.optimizer = optimizer = YFOptimizer()
elif config.opt_method == "momSGD":
print("uisng mom SGD")
optimizer = tf.train.MomentumOptimizer(self.lr, 0.9)
elif config.opt_method == "SGD":
print("uisng SGD")
optimizer = tf.train.GradientDescentOptimizer(self.lr)
elif config.opt_method == "Adagrad":
print("using adagrad")
optimizer = tf.train.AdagradOptimizer(self.lr)
else:
print("Optimizer is not supported")
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
self.train_loss_summary = tf.summary.scalar('train_loss', self._norm_loss)
self.writer = tf.summary.FileWriter(
os.path.join(config.log_dir, time.strftime("%Y-%m-%d-%H-%M-%S")))
# self.writer.add_graph(sess.graph)
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _read_words(filename):
with open_file(filename) as f:
return f.read().replace('\n', '<eos>').split()
def chop(data, eos):
new_data = []
sent = []
for w in data:
sent.append(w)
if w == eos:
new_data.append(sent)
sent = []
return new_data
def open_file(path):
if path.endswith('.gz'):
return gzip.open(path, 'rb')
else:
return open(path, 'r')
def ptb_iterator(raw_data, batch_size, num_steps):
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x, y)
# iterator used for nbest data.
def ptb_iterator2(raw_data, batch_size, num_steps, idx2tree, eos):
dummy1 = 0
dummy2 = (-1, -1)
remainder = len(raw_data) % batch_size
if remainder != 0:
raw_data = raw_data + [dummy1 for x in xrange(batch_size - remainder)]
idx2tree = idx2tree + [dummy2 for x in xrange(batch_size - remainder)]
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
remainder = (data_len // batch_size) % num_steps
data = np.zeros([batch_size, batch_len + num_steps - remainder + 1],
dtype=np.int32)
for i in range(batch_size):
data[i, 1:batch_len+1] = raw_data[batch_len * i:batch_len * (i + 1)]
if i == 0:
data[i, 0] = eos
else:
data[i, 0] = raw_data[batch_len - 1]
idx2tree = np.array(idx2tree, dtype=np.dtype('int, int'))
tree = np.zeros([batch_size, batch_len + num_steps - remainder],
dtype=np.dtype('int, int'))
for i in range(batch_size):
tree[i, :batch_len] = idx2tree[batch_len * i:batch_len * (i + 1)]
tree[i, batch_len:] = [dummy2 for x in xrange(num_steps - remainder)]
epoch_size = (batch_len + num_steps - remainder) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
z = tree[:, i*num_steps:(i+1)*num_steps]
yield (x, y, z)
def run_epoch(session, m, data, eval_op, verbose=False, epoch_id=None):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = []
for c, h in m.initial_state: # initial_state: ((c1, m1), (c2, m2))
state.append((c.eval(), h.eval()))
loss_list = []
for step, (x, y) in enumerate(ptb_iterator(data, m.batch_size,
m.num_steps)):
fetches = []
fetches.append(m.cost)
fetches.append(eval_op)
for c, h in m.final_state: # final_state: ((c1, m1), (c2, m2))
fetches.append(c)
fetches.append(h)
if verbose:
fetches.append(m.train_loss_summary)
fetches.append(m._norm_loss)
feed_dict = {}
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
for i, (c, h) in enumerate(m.initial_state):
feed_dict[c], feed_dict[h] = state[i]
res = session.run(fetches, feed_dict)
cost = res[0]
if verbose:
state_flat = res[2:-2] # [c1, m1, c2, m2]
else:
state_flat = res[2:] # [c1, m1, c2, m2]
state = [state_flat[i:i+2] for i in range(0, len(state_flat), 2)]
costs += np.sum(cost) / m.batch_size
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
if verbose:
#print("summary added step", epoch_id * epoch_size + step, epoch_id, epoch_size, epoch_id)
m.writer.add_summary(res[-2], epoch_id * epoch_size + step)
loss_list.append(res[-1])
return np.exp(costs / iters), loss_list
def run_epoch2(session, m, nbest, eval_op, eos, verbose=False):
"""Runs the model on the given data."""
counts = []
loss = []
prev = (-1, -1)
for pair in nbest['idx2tree']:
if pair[0] != prev[0]:
counts.append([0])
loss.append([0.])
elif pair[1] == prev[1] + 1:
counts[-1].append(0)
loss[-1].append(0.)
counts[-1][-1] += 1
prev = pair
data = nbest['data']
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = []
for c, h in m.initial_state: # initial_state: ((c1, m1), (c2, m2))
state.append((c.eval(), h.eval()))
for step, (x, y, z) in enumerate(
ptb_iterator2(data, m.batch_size, m.num_steps,
nbest['idx2tree'], eos)):
fetches = []
fetches.append(m.cost)
fetches.append(eval_op)
for c, h in m.final_state: # final_state: ((c1, m1), (c2, m2))
fetches.append(c)
fetches.append(h)
feed_dict = {}
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
for i, (c, h) in enumerate(m.initial_state):
feed_dict[c], feed_dict[h] = state[i]
res = session.run(fetches, feed_dict)
cost = res[0]
state_flat = res[2:] # [c1, m1, c2, m2]
state = [state_flat[i:i+2] for i in range(0, len(state_flat), 2)]
costs += np.sum(cost) / m.batch_size
iters += m.num_steps
cost = cost.reshape((m.batch_size, m.num_steps))
for idx, val in np.ndenumerate(cost):
tree_idx = z[idx[0]][idx[1]]
if tree_idx[0] == -1: # dummy
continue
counts[tree_idx[0]][tree_idx[1]] -= 1
loss[tree_idx[0]][tree_idx[1]] += cost[idx[0]][idx[1]]
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
scores = nbest['scores']
num = 0
gold, test, matched = 0, 0, 0
bad = []
for i in xrange(len(scores)):
good = True
ag = 0
min_val = 10000000
for j in xrange(len(scores[i])):
if counts[i][j] != 0:
bad.append(i)
good = False
break
if loss[i][j] < min_val:
min_val = loss[i][j]
ag = j
if good:
| |
<filename>tests/core/test_test.py
import os
import sys
import pytest
from golem.core import test as test_module, settings_manager
from golem.core.project import Project
from golem.core.test import Test
SAMPLE_TEST_CONTENT = """
description = 'some description'
tags = []
data = [{'a': 'b'}]
pages = ['page1', 'page2']
def setup(data):
page1.func1()
def test(data):
page2.func2('a', 'b')
click(page2.elem1)
def teardown(data):
pass
"""
NEW_TEST_CONTENT = """
description = ''
tags = []
pages = []
def setup(data):
pass
def test(data):
pass
def teardown(data):
pass
"""
EMPTY_STEPS = {'setup': [], 'test': [], 'teardown': []}
class TestCreateTest:
def test_create_test(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
errors = test_module.create_test(project, test_name)
test = Test(project, test_name)
assert test.exists
assert errors == []
assert test.code == NEW_TEST_CONTENT
def test_create_test_name_exists(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
test_module.create_test(project, test_name)
errors = test_module.create_test(project, test_name)
assert errors == ['A test with that name already exists']
def test_create_test_invalid_name(self, project_session):
_, project = project_session.activate()
# invalid chars
invalid_names = [
'te-st',
'te st',
'te?st',
'test. .test'
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['Only letters, numbers and underscores are allowed']
# empty directory
invalid_names = [
'.test',
'test..test',
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['Directory name cannot be empty']
# empty file name
invalid_names = [
'',
'test.',
]
for name in invalid_names:
errors = test_module.create_test(project, name)
assert errors == ['File name cannot be empty']
def test_create_test_into_folder(self, project_session, test_utils):
_, project = project_session.activate()
random_dir = test_utils.random_string()
# to folder
test_name = '{}.test001'.format(random_dir)
errors = test_module.create_test(project, test_name)
assert errors == []
# verify that each parent dir has __init__.py file
init_path = os.path.join(Project(project).test_directory_path,
random_dir, '__init__.py')
assert test_name in Project(project).tests()
assert os.path.isfile(init_path)
# to sub-folder
random_dir = test_utils.random_string()
random_subdir = test_utils.random_string()
test_name = '{}.{}.test001'.format(random_dir, random_subdir)
errors = test_module.create_test(project, test_name)
assert errors == []
assert test_name in Project(project).tests()
# verify that each parent dir has __init__.py file
init_path = os.path.join(Project(project).test_directory_path,
random_dir, '__init__.py')
assert os.path.isfile(init_path)
init_path = os.path.join(Project(project).test_directory_path,
random_dir, random_subdir, '__init__.py')
assert os.path.isfile(init_path)
class TestRenameTest:
def test_rename_test(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
def test_rename_test_in_folder(self, project_session, test_utils):
_, project = project_session.activate()
dir = test_utils.random_string()
name = test_utils.random_string()
test_name = '{}.{}'.format(dir, name)
test_utils.create_test(project, test_name)
# rename within same folder
new_name = test_utils.random_string()
new_test_name = '{}.{}'.format(dir, new_name)
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
# rename to another non existent folder
test_name = new_test_name
name = new_name
new_dir = test_utils.random_string()
new_test_name = '{}.{}'.format(new_dir, name)
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name not in tests
assert new_test_name in tests
def test_rename_test_invalid_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
# invalid chars
new_test_name = 'new-name'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Only letters, numbers and underscores are allowed']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
# empty filename
new_test_name = 'test.'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
# empty directory
new_test_name = 'test..test'
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Directory name cannot be empty']
tests = Project(project).tests()
assert test_name in tests
assert new_test_name not in tests
def test_rename_test_src_does_not_exist(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.random_string()
new_test_name = test_utils.random_string()
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['Test {} does not exist'.format(test_name)]
assert new_test_name not in Project(project).tests()
def test_rename_test_with_data_file(self, project_session, test_utils):
"""Assert when a test has a data file the data file is renamed as well"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
data_path = os.path.splitext(Test(project, test_name).path)[0] + '.csv'
with open(data_path, 'w+') as f:
f.write('')
new_data_path = os.path.splitext(Test(project, new_test_name).path)[0] + '.csv'
test_module.rename_test(project, test_name, new_test_name)
assert not os.path.isfile(data_path)
assert os.path.isfile(new_data_path)
def test_rename_dest_exists(self, project_session, test_utils):
_, project = project_session.activate()
dir = test_utils.random_string()
name_one = test_utils.random_string()
test_one = '{}.{}'.format(dir, name_one)
name_two = test_utils.random_string()
test_two = '{}.{}'.format(dir, name_two)
test_utils.create_test(project, test_one)
test_utils.create_test(project, test_two)
# rename test to existing test name
errors = test_module.rename_test(project, test_one, test_two)
assert errors == ['A file with that name already exists']
# rename test to same name
errors = test_module.rename_test(project, test_one, test_one)
assert errors == ['A file with that name already exists']
@pytest.mark.skipif("os.name != 'nt'")
def test_rename_test_test_is_open(self, project_session, test_utils):
"""Try to rename a test while it is open"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
with open(Test(project, test_name).path) as f:
errors = test_module.rename_test(project, test_name, new_test_name)
assert errors == ['There was an error renaming file']
class TestDuplicateTest:
def test_duplicate_test(self, project_session, test_utils):
_, project = project_session.activate()
# in root folder
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name in tests
assert new_test_name in tests
# in folder
dir = test_utils.random_string()
name = test_utils.random_string()
test_name = '{}.{}'.format(dir, name)
test_utils.create_test(project, test_name)
new_name = test_utils.random_string()
new_test_name = '{}.{}'.format(dir, new_name)
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == []
tests = Project(project).tests()
assert test_name in tests
assert new_test_name in tests
def test_duplicate_test_same_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
errors = test_module.duplicate_test(project, test_name, test_name)
assert errors == ['New test name cannot be the same as the original']
def test_duplicate_test_dest_exists(self, project_session, test_utils):
_, project = project_session.activate()
test_one = test_utils.create_random_test(project)
test_two = test_utils.create_random_test(project)
errors = test_module.duplicate_test(project, test_one, test_two)
assert errors == ['A test with that name already exists']
# to another folder
test_one = test_utils.create_random_test(project)
test_two = '{}.{}'.format(test_utils.random_string(), test_utils.random_string())
test_utils.create_test(project, test_two)
errors = test_module.duplicate_test(project, test_one, test_two)
assert errors == ['A test with that name already exists']
# to same name
test_one = test_utils.create_random_test(project)
test_utils.create_test(project, test_two)
errors = test_module.duplicate_test(project, test_one, test_one)
assert errors == ['New test name cannot be the same as the original']
def test_duplicate_test_invalid_name(self, project_session, test_utils):
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
# invalid name
new_test_name = 'new-name'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['Only letters, numbers and underscores are allowed']
# empty name
new_test_name = 'test.'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
# empty directory
new_test_name = 'test.'
errors = test_module.duplicate_test(project, test_name, new_test_name)
assert errors == ['File name cannot be empty']
def test_duplicate_test_with_data_file(self, project_session, test_utils):
"""Assert when a test has a data file the data file is duplicated as well"""
_, project = project_session.activate()
test_name = test_utils.create_random_test(project)
new_test_name = test_utils.random_string()
data_path = os.path.splitext(Test(project, test_name).path)[0] + '.csv'
with open(data_path, 'w+') as f:
f.write('')
new_data_path = os.path.splitext(test_module.Test(project, new_test_name).path)[0] + '.csv'
test_module.duplicate_test(project, test_name, new_test_name)
assert os.path.isfile(data_path)
assert os.path.isfile(new_data_path)
class TestEditTest:
def test_edit_test_data_infile(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
description = 'description'
pages = ['page1', 'page2']
test_steps = {
'setup': [
{'type': 'function-call', 'action': 'click', 'parameters': ['elem1']}
],
'test': [
{'type': 'function-call', 'action': 'send_keys', 'parameters': ['elem2', 'keys']}
],
'teardown': []
}
data = [{
'key': '\'value\''
}]
settings_manager.save_project_settings(project, '{"test_data": "infile"}')
test_module.edit_test(project, test_name, description, pages, test_steps, data, [])
expected = (
'\n'
'description = \'description\'\n'
'\n'
'tags = []\n'
'\n'
'pages = [\'page1\',\n'
' \'page2\']\n'
'\n'
'data = [\n'
' {\n'
' \'key\': \'value\',\n'
' },\n'
']\n'
'\n\n'
'def setup(data):\n'
' click(elem1)\n'
'\n\n'
'def test(data):\n'
' send_keys(elem2, keys)\n'
'\n\n'
'def teardown(data):\n'
' pass\n')
with open(Test(project, test_name).path) as f:
assert f.read() == expected
def test_edit_test_data_csv(self, project_function, test_utils):
_, project = project_function.activate()
test_name = test_utils.create_random_test(project)
description = 'description'
pages = []
test_steps = {
'setup': [],
'test': [
{'type': 'function-call', 'action': 'send_keys', 'parameters': ['elem2', 'keys']}
],
'teardown': []
}
data = [{
'key': '\'value\''
}]
settings_manager.save_project_settings(project, '{"test_data": "csv"}')
test_module.edit_test(project, test_name, description, pages, test_steps, data, [])
expected = (
'\n'
'description = \'description\'\n'
'\n'
'tags = []\n'
'\n'
'pages = []\n'
'\n\n'
'def setup(data):\n'
' pass\n'
'\n\n'
'def test(data):\n'
' send_keys(elem2, keys)\n'
'\n\n'
'def teardown(data):\n'
' pass\n')
with open(Test(project, test_name).path) as f:
assert f.read() == expected
data_path = os.path.join(Project(project).test_directory_path,
'{}.csv'.format(test_name))
expected = ('key\n'
'\'value\'\n')
with open(data_path) as f:
assert f.read() == expected
def test_edit_test_explicit_page_import(self, project_function, | |
strike, dip, rake
aplane = obspy.imaging.beachball.aux_plane(fplane.strike, fplane.dip, fplane.rake)
Tstrike = axes[0].strike
Tdip = axes[0].dip
Pstrike = axes[2].strike
Pdip = axes[2].dip
S1 = fplane.strike
D1 = fplane.dip
R1 = fplane.rake
S2 = aplane[0]
D2 = aplane[1]
R2 = aplane[2]
mplanes = [Pstrike,Pdip,Tstrike,Tdip,S1,D1,R1,S2,D2,R2]
return mplanes[n]
def moment_calc(df, args, seismo_thick,slabname):
''' Creates and appends columns with Principal Axis and Nodal Plane information.
Used in makeframe below. Takes moment tensor information from input dataframe
columns and creates 11 new columns with information used to distinguish between thrust
and non-thrust earthquakes.
Arguments: df - dataframe with mt information in the form mrr,mtt,mpp,mrt,mrp,mtp
args - input arguments provided from command line arguments
Returns: df - dataframe with mt information in the form Paz,Ppl,Taz,Tpl,S1,D1,R1,S2,D2,R2
'''
#try:
# Only calculates MT info where it exists in EQ datasets
df = inbounds(args, df, slabname)
dfm = df[np.isfinite(df['mrr'])]
dfn = df[df['mrr'].isnull()]
#except:
# raise Exception,'If file contains earthquake information (event-type = EQ), \
# required columns include: lat,lon,depth,mag,time. The columns of the current \
# file: %s. Check file format to ensure these columns are present and properly \
# labeled.' % df.columns
# Calculates each new column of MT info
try:
dfm['Paz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],0),axis=1)
dfm['Ppl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],1),axis=1)
dfm['Taz']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],2),axis=1)
dfm['Tpl']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],3),axis=1)
dfm['S1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],4),axis=1)
dfm['D1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],5),axis=1)
dfm['R1']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],6),axis=1)
dfm['S2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],7),axis=1)
dfm['D2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],8),axis=1)
dfm['R2']=dfm.apply(lambda row: m_to_planes(row['mrr'], row['mtt'], row['mpp'],
row['mrt'], row['mrp'], row['mtp'],9),axis=1)
# Concatenates events with and without MT info
#dfm = cmtfilter(dfm,seismo_thick)
df = pd.concat([dfm,dfn],sort=True)
# Rearranges columns and returns
if 'mlon' in df.columns:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2','mlon','mlat','mdep']]
else:
df = df[['lat','lon','depth','unc','ID','etype','mag','time',
'Paz','Ppl','Taz','Tpl','S1','D1','R1','S2','D2','R2']]
df['mlon'] = df['lon'].values*1.0
df['mlat'] = df['lat'].values*1.0
df['mdep'] = df['depth'].values*1.0
return df
except:
# if exception is caught, try to return only events without MT info
try:
if len(dfm) == 0:
return dfn
except:
print('Where moment tensor information is available, columns \
must be labeled: mrr,mpp,mtt,mrp,mrt,mtp')
def ymdhmsparse(input_file):
'''Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns.
Used in makeframe below. Returns a new dataframe with parsed datetimes. '''
ymdhms = {'time':['year','month','day','hour','min','sec']}
dparse = lambda x: pd.datetime.strptime(x, '%Y %m %d %H %M %S')
cols = ['year','month','day','hour','min','sec','lat','lon','depth','mag']
data = pd.read_csv(input_file, parse_dates=ymdhms, usecols=cols, date_parser=dparse)
return data
def raiseUnc(x):
''' Raises unreasonably low uncertainties for earthquakes to a value greater
than that of average active source data points (which is 5 km). '''
if x < 6:
return 6
else:
return x
def makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname):
''' Arguments: data - semi-filtered data frame to be filtered more and written to file
fcsv - filename of output file
event_type - kind of data i.e. BA, EQ, ER, TO etc
uncertainty - unc value provided in command line or set by default for etype
args - input arguments provided from command line arguments
Returns: data - fully filtered dataset to be written to output file '''
# Parses Yr Mo Day Hr Min Sec into one datetime object when provided in distinguished columns
if 'year' in data.columns and 'sec' in data.columns and 'mag' in data.columns:
data = ymdhmsparse(fcsv)
# If ISC-GEM data is provided, high quality, low uncertainties are included in place of
# the default values assigned in s2d.py main method.
if 'unc' in data.columns and 'q' in data.columns:
try:
data = data[(data.uq != 'C') & (data.unc < uncertainty)]
except:
print ('When adding a file with uncertainty quality, the column \
representing that quality must be labeled as uq')
# uses OG uncertainties where provided. Raises them if they are unreasonably low
elif 'unc' in data.columns:
uncert = data['unc'].values
try:
if isnan(uncert[1]):
data['unc'] = uncertainty
elif event_type == 'EQ':
data['unc'] = data.apply(lambda row: raiseUnc(row['unc']),axis=1)
else:
pass
except:
data['unc'] = uncertainty
# If no uncertainty column is included, the one provided in command line arguments is
# used to add a new column to the data, alternatively, the default value assigned in s2d.py is used
else:
data['unc'] = uncertainty
pd.options.mode.chained_assignment = None
# A new column marking the event type is added to the data. Everything is cast as a float
data['etype'] = event_type
data = castfloats(data)
# Calculates moment tensor info where applicable and removes shallow, non-thrust events
if 'mrr' in data.columns:
data = moment_calc(data, args, seismo_thick,slabname)
elif 'time' in data.columns and 'mag' in data.columns:
data = data[['lat','lon','depth','unc','ID','etype','mag','time']]
else:
pass
return data
##########################################################################################################
#The following serves to create a rough plot of the data types compiled with s2d.py.
##########################################################################################################
def plot_map(lons, lats, c, legend_label, projection='mill',
llcrnrlat=-80, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180, resolution='i'):
''' Optional Arguments: projection - map projection, default set as 'mill'
llcrnrlat - lower left corner latitude value, default is -80
urcrnrlat - upper right corner latitude value, default is 90
llcrnrlon - lower left corner longitude value, default is -180
urcrnrlon - upper right corner longitude value, default is 180
resolution - the resolution of the plot, default is 'i'
Required Arguments: lons - list of longitude values to be plotted
lats - list of latitude values to be plotted
c - the color of the points to be plotted
legend_label - how this set of points will be labeled on the legend
Returns: m - a basemap object defined by input bounds with input points included '''
# Creates a basic plot of a series of lat,lon points over a defined region
m = Basemap(projection=projection, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution=resolution)
m.drawcoastlines()
m.drawmapboundary()
m.drawcountries()
m.etopo()
m.drawmeridians(np.arange(llcrnrlon, urcrnrlon, 5), labels=[0,0,0,1], fontsize=10)
m.drawparallels(np.arange(llcrnrlat, urcrnrlat, 5), labels=[1,0,0,0], fontsize=10)
x,y = m(lons, lats)
m.scatter(x, y, color=c, label=legend_label, marker='o', edgecolor='none', s=10)
return m
def datelinecross(x):
'''Converts negative longitudes to their positive equivalent for the sake of plotting.'''
if x<0:
return x+360
else:
return x
##############################################################################################
#Everything below this point serves the purpose of identifying and
#eliminating duplicate events between multiple earthquake catalog entries.
##############################################################################################
class Earthquake:
'''Creates an earthquake object from which event information can be extracted'''
def __init__(self,time,coords,depth,lat,lon,mag,catalog):
self.time = time
self.coords = coords
self.depth = depth
self.lat = lat
self.lon = lon
self.mag = mag
self.catalog = catalog
def getvals(row):
'''Gathers time, lat, lon, depth, mag, information from row in dataframe.'''
time = row['time']
lat = row['lat']
lon = row['lon']
depth = row['depth']
mag = row['mag']
ep = (lat,lon)
return time,ep,depth,lat,lon,mag
def boundtrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across bounds
where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same region
lonmin1, lonmin2 = cat1['lon'].min(), cat2['lon'].min()
latmin1, latmin2 = cat1['lat'].min(), cat2['lat'].min()
lonmax1, lonmax2 = cat1['lon'].max(), cat2['lon'].max()
latmax1, latmax2 = cat1['lat'].max(), cat2['lat'].max()
minwest = (lonmax1 > 0 and lonmax1 < 180) or (lonmax2 > 0 and lonmax2 < 180)
maxeast = (lonmin1 < 0 and lonmin1 > -180) or (lonmin2 < 0 and lonmin2 > -180)
difference = abs(lonmin1-lonmax1)>180 or abs(lonmin2-lonmax2)>180
if minwest and maxeast and difference:
pass
else:
cat1 = cat1[(cat1.lon >= lonmin2) & (cat1.lon <= lonmax2)]
cat2 = cat2[(cat2.lon >= lonmin1) & (cat2.lon <= lonmax1)]
cat1 = cat1[(cat1.lat >= latmin2) & (cat1.lat <= latmax2)]
cat2 = cat2[(cat2.lat >= latmin1) & (cat2.lat <= latmax1)]
return cat1, cat2
def timetrim(cat1, cat2):
''' Arguments: cat1 - an earthquake catalog to be compared with cat2
cat2 - an earthquake catalog to be compared to cat1
Returns: cat1, cat2 - trimmed earthquake catalogs that only extend across time
frames where they both exist. Reduces processing time
'''
# Trims two earthquake catalogs to fit over the same time range
cat1['time'] | |
_evalfunc(self, reader_arg=None, process_cnt=None):
"""
Initialize a Fits-instance and perform a fit.
(used for parallel processing)
Parameters
----------
reader_arg : dict
A dict of arguments passed to the reader.
process_cnt : list
A list of shared-memory variables that are used to update the
progressbar
Returns
-------
The used 'rt1.rtfit.Fits' object or the output of 'postprocess()'
"""
if process_cnt is not None:
start = _start_cnt()
try:
# if a reader (and no dataset) is provided, use the reader
read_data = self.proc_cls.reader(reader_arg)
# check for multiple return values and split them accordingly
# (any value beyond the first is appended as aux_data)
if isinstance(read_data, pd.DataFrame):
dataset = read_data
aux_data = None
elif (isinstance(read_data, (list, tuple))
and isinstance(read_data[0], pd.DataFrame)):
if len(read_data) == 2:
dataset, aux_data = read_data
elif len(read_data) > 2:
dataset = read_data[0]
aux_data = read_data[1:]
else:
raise TypeError('the first return-value of reader function ' +
'must be a pandas DataFrame')
# initialize a new fits-object and perform the fit
fit = self.parent_fit.reinit_object(dataset=dataset)
fit.performfit()
# append auxiliary data
if aux_data is not None:
fit.aux_data = aux_data
# append reader_arg
fit.reader_arg = reader_arg
_increase_cnt(process_cnt, start, err=False)
# if a post-processing function is provided, return its output,
# else return the fit-object directly
if callable(self.proc_cls.postprocess):
ret = self.proc_cls.postprocess(fit, reader_arg)
else:
ret = fit
return ret
except Exception as ex:
if callable(self.proc_cls.exceptfunc):
ex_ret = self.proc_cls.exceptfunc(ex, reader_arg)
if ex_ret is None or ex_ret is False:
_increase_cnt(process_cnt, start, err=True)
else:
_increase_cnt(process_cnt, start, err=False)
return ex_ret
else:
raise ex
def processfunc(self, ncpu=1, print_progress=True,
reader_args=None, pool_kwargs=None,
preprocess_kwargs=None):
"""
Evaluate a RT-1 model on a single core or in parallel using
- a list of datasets or
- a reader-function together with a list of arguments that
will be used to read the datasets
Notice:
On Windows, if multiprocessing is used, you must protect the call
of this function via:
(see for example: )
>>> if __name__ == '__main__':
fit.processfunc(...)
In order to allow pickling the final rt1.rtfits.Fits object,
it is required to store ALL definitions within a separate
file and call processfunc in the 'main'-file as follows:
>>> from specification_file import fit reader lsq_kwargs ... ...
if __name__ == '__main__':
fit.processfunc(ncpu=5, reader=reader,
lsq_kwargs=lsq_kwargs, ... ...)
Parameters
----------
ncpu : int, optional
The number of kernels to use. The default is 1.
print_progress : bool, optional
indicator if a progress-bar should be printed to stdout or not
that looks like this:
>>> approx. 0 00:00:02 remaining ################------ 3 (2) / 4
...
... (estimated time day HH:MM:SS)( progress bar )( counts )
... ( counts ) = finished fits [actually fitted] / total
The default is True.
reader_args : list, optional
A list of dicts that will be passed to the reader-function.
I `None`, the `reader_args` will be taken from the return of the
`preprocess()`-function via:
>>> reader_args = preprocess(**preprocess_kwargs)['reader_args']
The default is None.
pool_kwargs : dict, optional
A dict with additional keyword-arguments passed to the
initialization of the multiprocessing-pool via:
>>> mp.Pool(ncpu, **pool_kwargs)
The default is None.
preprocess_kwargs : dict, optional
A dict with keyword-arguments passed to the call of the preprocess
function via:
>>> preprocess(**preprocess_kwargs)
The default is None.
Returns
-------
res : list
A list of rt1.rtfits.Fits objects or a list of outputs of the
postprocess-function.
"""
if callable(self.proc_cls.preprocess):
setupdict = self.proc_cls.preprocess(**preprocess_kwargs)
if setupdict is None:
setupdict = dict()
assert isinstance(setupdict, dict), (
'the preprocess() function must return a dict!')
else:
setupdict = dict()
# check if reader args is provided in setupdict
if reader_args is None:
assert 'reader_args' in setupdict, (
'if "reader_args" is not passed directly to processfunc() ' +
', the preprocess() function must return a key "reader_args"!')
reader_args = setupdict['reader_args']
else:
assert 'reader_args' not in setupdict, (
'"reader_args" is provided as argument to processfunc() ' +
'AND via the return-dict of the preprocess() function!')
print(f'processing {len(reader_args)} features')
if 'pool_kwargs' in setupdict:
pool_kwargs = setupdict['pool_kwargs']
if pool_kwargs is None:
pool_kwargs = dict()
if self.parent_fit.int_Q is True:
# pre-evaluate the fn-coefficients if interaction terms are used
self.parent_fit._fnevals_input = self.parent_fit.R._fnevals
if print_progress is True:
# initialize shared values that will be used to track the number
# of completed processes and the mean time to complete a process
process_cnt = _setup_cnt(N_items=len(reader_args), ncpu=ncpu)
else:
process_cnt = None
if ncpu > 1:
print('start of parallel evaluation')
with mp.Pool(ncpu, **pool_kwargs) as pool:
# loop over the reader_args
res_async = pool.starmap_async(self._evalfunc,
zip(reader_args,
repeat(process_cnt)))
pool.close() # Marks the pool as closed.
pool.join() # Waits for workers to exit.
res = res_async.get()
else:
print('start of single-core evaluation')
# call the initializer if it has been provided
if 'initializer' in pool_kwargs:
if 'initargs' in pool_kwargs:
pool_kwargs['initializer'](*pool_kwargs['initargs'])
else:
pool_kwargs['initializer']()
res = []
for reader_arg in reader_args:
res.append(self._evalfunc(reader_arg=reader_arg,
process_cnt=process_cnt))
if callable(self.proc_cls.finaloutput):
return self.proc_cls.finaloutput(res)
else:
return res
def run_processing(self, ncpu=1, copy=True, print_progress=True,
reader_args=None, pool_kwargs=None,
preprocess_kwargs=None):
'''
Start the processing
Parameters
----------
ncpu : int
The number of cpu's to use. The default is 1.
copy : bool, optional
Indicator if the used config-file and all modules specified in the
"CONFIGFILES" section of the config-file should be copied to
"/dumpfolder/cfg" or not. The default is True.
print_progress : bool, optional
Indicator if a progress-bar should be printed or not.
If True, it might be wise to suppress warnings during runtime
to avoid unwanted outputs. This can be achieved by using:
>>> import warnings
... warnings.simplefilter('ignore')
The default is True.
reader_args : list, optional
A list of dicts that will be passed to the reader-function.
I `None`, the `reader_args` will be taken from the return of the
`preprocess()`-function via:
>>> reader_args = preprocess(**preprocess_kwargs)['reader_args']
The default is None.
pool_kwargs : dict, optional
A dict with additional keyword-arguments passed to the
initialization of the multiprocessing-pool via:
>>> mp.Pool(ncpu, **pool_kwargs)
The default is None.
preprocess_kwargs : dict, optional
A dict with keyword-arguments passed to the call of the preprocess
function via:
>>> preprocess(**preprocess_kwargs)
The default is None.
'''
print('############################################################\n')
# initialize all necessary properties
self.setup(copy=copy)
if preprocess_kwargs is None:
preprocess_kwargs = dict()
# save the used model-definition string to a file
if self.dumppath is not None:
with open(self.dumppath / 'cfg' / 'model_definition.txt',
'w') as file:
outtxt = ''
if hasattr(self.proc_cls, 'description'):
outtxt += dedent(self.proc_cls.description)
outtxt += '\n\n'
outtxt += '_'*77
outtxt += '\n\n'
outtxt += self.parent_fit._model_definition
print(outtxt, file=file)
_ = self.processfunc(ncpu=ncpu, print_progress=print_progress,
reader_args=reader_args, pool_kwargs=pool_kwargs,
preprocess_kwargs=preprocess_kwargs)
class RTresults(object):
'''
A class to provide easy access to processed results.
On initialization the class will traverse the provided "parent_path"
and recognize any sub-folder that matches the expected folder-structure
as a sub-result.
Assuming a folder-structure as indicated below, the class can be used via:
>>> ../../RESULTS (parent_path)
... results/.. (.nc files)
... dumps/.. (.dump files)
... cfg/.. (.ini files)
...
... sub_RESULT1
... results/.. (.nc files)
... dumps/.. (.dump files)
... cfg/.. (.ini files)
...
... sub_RESULT2
... ....
>>> results = RT1_results(parent_path)
... # print available NetCDF files and variables
... x.RESULTS.NetCDF_variables
... x.sub_RESULT_1.NetCDF_variables
...
... # load some dump-files
... fit_random = results.sub_RESULT_1.load_fit()
... fit1_0 = results.RESULT.load_fit('id_of_fit_1')
... fit1_1 = results.sub_RESULT_1.load_fit('id_of_fit_1')
... fit1_2 = results.sub_RESULT_2.load_fit('id_of_fit_1')
...
... # access a NetCDF file
... with results.sub_RESULT_2.load_nc() as ncfile:
... --- read something from the ncfie ---
...
... # get a generator for the paths of all available dump-files
... dump-files = results.sub_RESULT_1.dump_files
...
... # load the configfile of a given fit
... cfg_01 = results.sub_RESULT_1.load_cfg()
Parameters
----------
parent_path : str
the parent-path where the results are stored.
'''
def __init__(self, parent_path):
self._parent_path = Path(parent_path)
self._paths = dict()
if all(i in [i.stem for i in self._parent_path.iterdir()]
for i in ['cfg', 'results', 'dumps']):
self._paths[self._parent_path.stem] = self._parent_path
print('... adding result', self._parent_path.stem)
setattr(self, self._parent_path.stem,
self._RT1_fitresult(self._parent_path.stem,
self._parent_path))
for p in self._parent_path.iterdir():
if p.is_dir():
if all(i in [i.stem for i in p.iterdir()]
for i in ['cfg', 'results', 'dumps']):
self._paths[p.stem] = p
print('... adding result', p.stem)
setattr(self, p.stem,
self._RT1_fitresult(p.stem, p))
class _RT1_fitresult(object):
def __init__(self, name, path):
self.name = name
self.path = | |
#!/usr/bin/env python3
from cache import Cache
from math import ceil
import string
DISK = "" # disk location
ROOT_LOCATION = 0 # root dir location
# file globals var
FILE_NAME_SIZE = 10
CHARS_ALLOWED = string.ascii_letters + string.digits + "."
NAME_PROHIBITED = ("..", ".")
# Formatting a virtual partition.
def init(PATH: str, size: int) -> int:
"""Initializes a virtual partition.
"""
global DISK
global ROOT_LOCATION
if size > 268431360:
raise ValueError("unallowed size (too large)")
DISK = PATH
table_size = int(
ceil(size / (512 * 8))
) # 512 * 8 stands for the number of bytes in a block, multiplied by the number of bits in a byte, since we use 1 bit by block.
bin_table_size = bytes([table_size // 256, table_size % 256])
ROOT_LOCATION = table_size
new_disk = Cache(PATH)
new_disk.write(
size, bin_table_size + b"\xF8"
) # + b"\xF8" is here because the 5 first block are used - and allocated - for the root directory. Here begins the allocation table.
# Opening ("mounting") a virtual disk.
def open(PATH):
global DISK
global ROOT_LOCATION
DISK = PATH
disktoopen = Cache(PATH)
disktoopen.seek(0)
first_block = disktoopen.read(1)
table_size = first_block[0] * 256 + first_block[1]
ROOT_LOCATION = table_size
# Reading a directory's content.
def ls(DIR: str) -> list:
"""Read a directory's content.
"""
if DISK == "" and ROOT_LOCATION == 0:
raise Exception("disque need to bee open")
path = DIR.split("/")
if path[0] == "" and path[len(path) - 1] == "":
path[0] = "/"
else:
raise SyntaxError("invalid path : " + DIR)
diskfile = Cache(DISK)
emplacement = [ROOT_LOCATION + x for x in range(5)]
i = 1
while True:
exist = False
dir = b""
for x in emplacement:
diskfile.seek(x)
dir += diskfile.read(1)
data = dir.strip(b"\x00")
data2 = []
sss = 0
xx = 0
data3 = ";".encode() + data
if data == b"":
return []
while True:
data2.append([b"", [], b"0"])
for aaaa in range(sss, len(data3)):
if bytes([data3[aaaa]]) == ":".encode():
data2[xx][0] = data3[sss + 1 : aaaa]
sss = aaaa
break
for aaaa in range(sss, len(data3)):
if (
(aaaa - sss) % 5 == 0
and aaaa != sss
and bytes([data3[aaaa]]) == "+".encode()
):
data2[xx][1].append(data3[aaaa - 4 : aaaa])
elif (
(aaaa - sss) % 5 == 0
and aaaa != sss
and bytes([data3[aaaa]]) == ":".encode()
):
data2[xx][1].append(data3[aaaa - 4 : aaaa])
data2[xx][2] = bytes([data3[aaaa + 1]])
sss = aaaa + 2
break
xx += 1
if sss == len(data3):
break
data = data2
if data == [[b""]] and i == len(path) - 1:
return []
elif data == [[b""]] and i != len(path) - 1:
raise SyntaxError("invalid path : " + DIR)
for x in range(len(data)):
locs = []
for y in range(len(data[x][1])):
loc = 0
for j in range(len(data[x][1][y])):
loc += data[x][1][y][j] * (
256 ** (len(data[x][1][y]) - j - 1)
)
locs.append(loc)
data[x][1] = locs
for x in data:
if path[i] == "":
return data
if path[i].encode() in x and x[2] == b"0":
i += 1
exist = True
emplacement = [y for y in x[1]]
break
if exist == False:
raise SyntaxError("invalid path : " + DIR)
# Entry appending and deletion in a directory.
def mkdir(parent_dir, NAME: str) -> int:
"""Creates an empty directory.
"""
if DISK == "" and ROOT_LOCATION == 0:
raise Exception("disque need to be open")
if len(NAME) > FILE_NAME_SIZE:
raise SyntaxError("bad file name")
for i in NAME:
if i not in CHARS_ALLOWED:
raise SyntaxError("bad file name")
if NAME in NAME_PROHIBITED:
raise SyntaxError("bad file name")
try:
dir_content = ls(parent_dir)
if NAME.encode() in [x[0] for x in dir_content]:
return -1
nex_loc = free_block()
use_block(nex_loc)
disk = Cache(DISK)
disk.seek(nex_loc)
disk.write(1, "".encode())
set_location(parent_dir, NAME, [nex_loc], b"0")
return 0
except SyntaxError as e:
return -1
def rmdir(parent_dir, NAME: str) -> int:
"""Delete an empty directory.
"""
if DISK == "" and ROOT_LOCATION == 0:
raise Exception("disque need to be open")
if NAME.encode() not in [x[0] for x in ls(parent_dir)]:
return -1
if ls(parent_dir + NAME + "/") != []:
return -1
parent_dir_content = ls(parent_dir)
for x in parent_dir_content:
if x[0] == NAME.encode():
loc = x[1]
new_parent_dir_content = [y for y in parent_dir_content if y[1] != loc]
disk = Cache(DISK)
loc_up_dir = [ROOT_LOCATION + x for x in range(5)]
if parent_dir != "/":
up_dir = parent_dir.split("/")
up_dir_parent = parent_dir[
0 : len(parent_dir) - len(up_dir[len(up_dir) - 2]) - 1
]
for i in ls(up_dir_parent):
if i[0] == up_dir[len(up_dir) - 2].encode():
loc_up_dir = i[1]
data = b""
# print(dir_content)
for x in new_parent_dir_content:
if len(x) > 1:
loc_ = b""
for y in x[1]:
loc_ += bytes(
[
y // 16777216,
y % 16777216 // 65536,
y % 16777216 % 65536 // 256,
y % 16777216 % 65536 % 256,
]
)
loc_ += "+".encode()
loc_ = loc_[0 : len(loc_) - 1]
data += x[0]
data += ":".encode()
data += loc_
data += ":".encode()
data += x[2]
data += ";".encode()
data = data[0 : len(data) - 1]
for x in range(len(loc_up_dir)):
disk.seek(loc_up_dir[x])
disk.write(1, data[x * 512 : (x + 1) * 512])
for x in loc:
use_block(x, "0")
# print(parent_dir_content, new_parent_dir_content)
return 0
# rm
def rm(parent_dir, NAME: str, mode=0):
"""delete a file (mode 1 = secure)
"""
if DISK == "" and ROOT_LOCATION == 0:
raise Exception("disque need to be open")
try:
ls(parent_dir)
except SyntaxError as e:
return -1
if NAME.encode() not in [x[0] for x in ls(parent_dir)]:
return -1
for x in ls(parent_dir):
if x[0] == NAME.encode():
if x[2] == b"0":
return -1
parent_dir_content = ls(parent_dir)
for x in parent_dir_content:
if x[0] == NAME.encode():
loc = x[1]
new_parent_dir_content = [y for y in parent_dir_content if y[1] != loc]
disk = Cache(DISK)
loc_up_dir = [ROOT_LOCATION + x for x in range(5)]
if parent_dir != "/":
up_dir = parent_dir.split("/")
up_dir_parent = parent_dir[
0 : len(parent_dir) - len(up_dir[len(up_dir) - 2]) - 1
]
for i in ls(up_dir_parent):
if i[0] == up_dir[len(up_dir) - 2].encode():
loc_up_dir = i[1]
data = b""
# print(dir_content)
for x in new_parent_dir_content:
if len(x) > 1:
loc_ = b""
for y in x[1]:
loc_ += bytes(
[
y // 16777216,
y % 16777216 // 65536,
y % 16777216 % 65536 // 256,
y % 16777216 % 65536 % 256,
]
)
loc_ += "+".encode()
loc_ = loc_[0 : len(loc_) - 1]
data += x[0]
data += ":".encode()
data += loc_
data += ":".encode()
data += x[2]
data += ";".encode()
data = data[0 : len(data) - 1]
for x in range(len(loc_up_dir)):
disk.seek(loc_up_dir[x])
disk.write(1, data[x * 512 : (x + 1) * 512])
if mode == 1:
for x in range(len(loc)):
disk.seek(loc[x])
disk.write(1, b"\x00")
for x in loc:
use_block(x, "0")
# print(parent_dir_content, new_parent_dir_content)
return 0
# set location
def set_location(PATH: str, file: str, loc: list, state=b"1"):
if DISK == "" and ROOT_LOCATION == 0:
raise Exception("disque need to be open")
dir_content = ls(PATH)
loc_file = None
for x in range(len(dir_content)):
if dir_content[x][0] == file.encode():
loc_file = dir_content[x][1]
break
if loc_file == None:
dir_content.append([file.encode(), [x for x in loc], state])
else:
if loc_file == loc:
return 0
dir_content[x][1] = loc
if PATH != "/":
dir = PATH.split("/")
dir_name = dir[len(dir) - 2]
up_dir = PATH[0 : len(PATH) - len(dir_name) - 1]
up_dir_content = ls(up_dir)
for x in up_dir_content:
if x[0] == dir_name.encode():
loc_dir = x[1]
else:
loc_dir = [ROOT_LOCATION + x for x in range(5)]
disk = Cache(DISK)
data = b""
# print(dir_content)
for x in dir_content:
if len(x) > 1:
loc_ = b""
for y in x[1]:
loc_ += bytes(
[
y // 16777216,
y % 16777216 // 65536,
y % 16777216 % 65536 // 256,
y % 16777216 % 65536 % 256,
]
)
loc_ += "+".encode()
loc_ = loc_[0 : len(loc_) - 1]
data += x[0]
data += ":".encode()
data += loc_
data += ":".encode()
data += x[2]
data += ";".encode()
data = data[0 : len(data) - 1]
for x in range(len(data) // 512 + 1 * int(len(data) % 512 != 0)):
disk.seek(loc_dir[x])
disk.write(1, data[x * 512 : (x + 1) * 512])
disk.seek(loc_dir[x])
return 0
# | |
is not None:
return self._resume_checkpoint(
*args, checkpoint_resume=checkpoint_resume, **kwargs
)
if branch:
rev = self.scm.resolve_rev(branch)
logger.debug(
"Using '%s' (tip of branch '%s') as baseline", rev, branch
)
else:
rev = self.repo.scm.get_rev()
self._scm_checkout(rev)
force = kwargs.get("force", False)
try:
stash_rev = self._stash_exp(
*args, branch=branch, checkpoint_reset=force, **kwargs,
)
except UnchangedExperimentError as exc:
logger.info("Reproducing existing experiment '%s'.", rev[:7])
raise exc
logger.debug(
"Stashed experiment '%s' for future execution.", stash_rev[:7]
)
return stash_rev
def _resume_checkpoint(
self, *args, checkpoint_resume: Optional[str] = None, **kwargs,
):
"""Resume an existing (checkpoint) experiment.
Experiment will be reproduced and checked out into the user's
workspace.
"""
assert checkpoint_resume
branch = None
if checkpoint_resume == self.LAST_CHECKPOINT:
# Continue from most recently committed checkpoint
branch = self._get_last_checkpoint()
resume_rev = self.scm.resolve_rev(branch)
else:
rev = self.scm.resolve_rev(checkpoint_resume)
resume_rev = rev
branch = self._get_branch_containing(rev)
if not branch:
raise DvcException(
"Could not find checkpoint experiment "
f"'{checkpoint_resume}'"
)
baseline_rev = self._get_baseline(branch)
if kwargs.get("params", None):
logger.debug(
"Branching from checkpoint '%s' with modified params",
checkpoint_resume,
)
rev = resume_rev
branch = None
else:
logger.debug(
"Continuing checkpoint experiment '%s'", checkpoint_resume
)
rev = self.scm.resolve_rev(branch)
logger.debug(
"Using '%s' (tip of branch '%s') as baseline", rev, branch
)
self._scm_checkout(rev)
kwargs["apply_workspace"] = False
stash_rev = self._stash_exp(
*args, baseline_rev=baseline_rev, branch=branch, **kwargs
)
logger.debug(
"Stashed experiment '%s' for future execution.", stash_rev[:7]
)
return stash_rev
def _get_last_checkpoint(self):
for head in sorted(
self.scm.repo.heads,
key=lambda h: h.commit.committed_date,
reverse=True,
):
exp_branch = head.name
m = self.BRANCH_RE.match(exp_branch)
if m and m.group("checkpoint"):
return exp_branch
raise DvcException("No existing checkpoint experiment to continue")
@scm_locked
def reproduce(
self,
revs: Optional[Iterable] = None,
keep_stash: Optional[bool] = True,
**kwargs,
):
"""Reproduce the specified experiments.
Args:
revs: If revs is not specified, all stashed experiments will be
reproduced.
keep_stash: If True, stashed experiments will be preserved if they
fail to reproduce successfully.
"""
stash_revs = self.stash_revs
# to_run contains mapping of:
# input_rev: (stash_index, rev, baseline_rev)
# where input_rev contains the changes to execute (usually a stash
# commit), rev is the original SCM commit to be checked out, and
# baseline_rev is the experiment baseline.
if revs is None:
to_run = dict(stash_revs)
else:
to_run = {
rev: stash_revs[rev]
if rev in stash_revs
else self.StashEntry(None, rev, rev, None)
for rev in revs
}
logger.debug(
"Reproducing experiment revs '%s'",
", ".join((rev[:7] for rev in to_run)),
)
# setup executors - unstash experiment, generate executor, upload
# contents of (unstashed) exp workspace to the executor tree
executors = {}
for rev, item in to_run.items():
self._scm_checkout(item.rev)
self.scm.repo.git.stash("apply", rev)
packed_args, packed_kwargs = self._unpack_args()
checkpoint_reset = packed_kwargs.pop("checkpoint_reset", False)
executor = LocalExecutor(
rev=item.rev,
baseline_rev=item.baseline_rev,
branch=item.branch,
repro_args=packed_args,
repro_kwargs=packed_kwargs,
dvc_dir=self.dvc_dir,
cache_dir=self.repo.cache.local.cache_dir,
checkpoint_reset=checkpoint_reset,
)
self._collect_input(executor)
executors[rev] = executor
exec_results = self._reproduce(executors, **kwargs)
if keep_stash:
# only drop successfully run stashed experiments
to_drop = sorted(
(
stash_revs[rev][0]
for rev in exec_results
if rev in stash_revs
),
reverse=True,
)
else:
# drop all stashed experiments
to_drop = sorted(
(stash_revs[rev][0] for rev in to_run if rev in stash_revs),
reverse=True,
)
for index in to_drop:
self.scm.repo.git.stash("drop", index)
result = {}
for _, exp_result in exec_results.items():
result.update(exp_result)
return result
def _reproduce(self, executors: dict, jobs: Optional[int] = 1) -> dict:
"""Run dvc repro for the specified ExperimentExecutors in parallel.
Returns dict containing successfully executed experiments.
"""
result: dict = {}
collect_lock = threading.Lock()
with ThreadPoolExecutor(max_workers=jobs) as workers:
futures = {}
for rev, executor in executors.items():
checkpoint_func = partial(
self._checkpoint_callback,
result,
collect_lock,
rev,
executor,
)
future = workers.submit(
executor.reproduce,
executor.dvc_dir,
cwd=executor.dvc.root_dir,
checkpoint_func=checkpoint_func,
**executor.repro_kwargs,
)
futures[future] = (rev, executor)
for future in as_completed(futures):
rev, executor = futures[future]
exc = future.exception()
try:
if exc is None:
stages = future.result()
self._collect_executor(
rev, executor, stages, result, collect_lock
)
else:
# Checkpoint errors have already been logged
if not isinstance(exc, CheckpointKilledError):
logger.exception(
"Failed to reproduce experiment '%s'",
rev[:7],
exc_info=exc,
)
finally:
executor.cleanup()
return result
def _collect_executor(self, rev, executor, stages, result, lock):
exp_hash = hash_exp(stages)
checkpoint = any(stage.is_checkpoint for stage in stages)
lock.acquire()
try:
# NOTE: GitPython Repo instances cannot be re-used
# after process has received SIGINT or SIGTERM, so we
# need this hack to re-instantiate git instances after
# checkpoint runs. See:
# https://github.com/gitpython-developers/GitPython/issues/427
del self.repo.scm
del self.scm
if executor.branch:
self._scm_checkout(executor.branch)
else:
self._scm_checkout(executor.rev)
exp_rev = self._collect_and_commit(
rev, executor, exp_hash, checkpoint=checkpoint
)
if exp_rev:
logger.info("Reproduced experiment '%s'.", exp_rev[:7])
result[rev] = {exp_rev: exp_hash}
finally:
lock.release()
@no_type_check
def _checkpoint_callback(
self,
result: Mapping,
lock: threading.Lock,
rev: str,
executor: LocalExecutor,
unchanged: Iterable,
stages: Iterable,
):
lock.acquire()
try:
if executor.branch:
self._scm_checkout(executor.branch)
else:
self._scm_checkout(executor.rev)
exp_hash = hash_exp(stages + unchanged)
exp_rev = self._collect_and_commit(
rev, executor, exp_hash, checkpoint=True
)
if exp_rev:
if not executor.branch:
branch = self._get_branch_containing(exp_rev)
executor.branch = branch
logger.info(
"Checkpoint experiment iteration '%s'.", exp_rev[:7]
)
result[rev] = {exp_rev: exp_hash}
finally:
lock.release()
def _collect_and_commit(self, rev, executor, exp_hash, **kwargs):
try:
self._collect_output(executor)
except DownloadError:
logger.error(
"Failed to collect output for experiment '%s'", rev,
)
return None
finally:
if os.path.exists(self.args_file):
remove(self.args_file)
try:
create_branch = not executor.branch
exp_rev = self._commit(
exp_hash,
baseline_rev=executor.baseline_rev,
create_branch=create_branch,
checkpoint_reset=executor.checkpoint_reset,
**kwargs,
)
except UnchangedExperimentError as exc:
logger.debug(
"Experiment '%s' identical to '%s'", rev, exc.rev,
)
exp_rev = exc.rev
return exp_rev
def _collect_input(self, executor: ExperimentExecutor):
"""Copy (upload) input from the experiments workspace to the executor
tree.
"""
logger.debug("Collecting input for '%s'", executor.tmp_dir)
repo_tree = RepoTree(self.exp_dvc)
self._process(
executor.tree,
self.exp_dvc.tree,
executor.collect_files(self.exp_dvc.tree, repo_tree),
)
def _collect_output(self, executor: ExperimentExecutor):
"""Copy (download) output from the executor tree into experiments
workspace.
"""
logger.debug("Collecting output from '%s'", executor.tmp_dir)
self._process(
self.exp_dvc.tree,
executor.tree,
executor.collect_output(),
download=True,
)
@staticmethod
def _process(dest_tree, src_tree, collected_files, download=False):
from dvc.remote.base import _log_exceptions
from_infos = []
to_infos = []
names = []
for from_info in collected_files:
from_infos.append(from_info)
fname = from_info.relative_to(src_tree.path_info)
names.append(str(fname))
to_infos.append(dest_tree.path_info / fname)
total = len(from_infos)
if download:
func = partial(
_log_exceptions(src_tree.download, "download"),
dir_mode=dest_tree.dir_mode,
)
desc = "Downloading"
else:
func = partial(_log_exceptions(dest_tree.upload, "upload"))
desc = "Uploading"
with Tqdm(total=total, unit="file", desc=desc) as pbar:
func = pbar.wrap_fn(func)
# TODO: parallelize this, currently --jobs for repro applies to
# number of repro executors not download threads
with ThreadPoolExecutor(max_workers=1) as dl_executor:
mode = None
stat_func = getattr(src_tree, "stat", None)
futures = []
for from_info, to_info, name in zip(
from_infos, to_infos, names
):
if stat_func:
mode = stat.S_IMODE(stat_func(from_info).st_mode)
futures.append(
dl_executor.submit(
func, from_info, to_info, name, file_mode=mode
)
)
fails = sum(
future.result() for future in as_completed(futures)
)
if fails:
if download:
raise DownloadError(fails)
raise UploadError(fails)
@scm_locked
def checkout_exp(self, rev, **kwargs):
"""Checkout an experiment to the user's workspace."""
from git.exc import GitCommandError
from dvc.repo.checkout import checkout as dvc_checkout
baseline_rev = self._check_baseline(rev)
self._scm_checkout(rev)
branch = self._get_branch_containing(rev)
m = self.BRANCH_RE.match(branch) if branch else None
if m and m.group("checkpoint"):
kwargs.update({"allow_missing": True, "quiet": True})
tmp = tempfile.NamedTemporaryFile(delete=False).name
self.scm.repo.head.commit.diff(
baseline_rev, patch=True, full_index=True, binary=True, output=tmp
)
dirty = self.repo.scm.is_dirty(untracked_files=True)
if dirty:
logger.debug("Stashing workspace changes.")
self.repo.scm.repo.git.stash("push", "--include-untracked")
try:
if os.path.getsize(tmp):
logger.debug("Patching local workspace")
self.repo.scm.repo.git.apply(tmp, reverse=True)
need_checkout = True
else:
need_checkout = False
except GitCommandError:
raise DvcException("failed to apply experiment changes.")
finally:
remove(tmp)
if dirty:
self._unstash_workspace()
args_file = os.path.join(self.repo.tmp_dir, self.PACKED_ARGS_FILE)
if os.path.exists(args_file):
remove(args_file)
if need_checkout:
dvc_checkout(self.repo, **kwargs)
def _check_baseline(self, exp_rev):
baseline_sha = self.repo.scm.get_rev()
if exp_rev == baseline_sha:
return exp_rev
exp_baseline = self._get_baseline(exp_rev)
if exp_baseline is None:
# if we can't tell from branch name, fall back to parent commit
exp_commit = self.scm.repo.rev_parse(exp_rev)
exp_baseline = first(exp_commit.parents).hexsha
if exp_baseline == baseline_sha:
return exp_baseline
raise BaselineMismatchError(exp_baseline, baseline_sha)
def _unstash_workspace(self):
# Essentially we want `git stash pop` with `-X ours` merge strategy
# to prefer the applied experiment changes over stashed workspace
# changes. git stash doesn't support merge strategy parameters, but we
# can do it ourselves with checkout/reset.
from git.exc import GitCommandError
logger.debug("Unstashing workspace changes.")
git_repo = self.repo.scm.repo.git
# stage workspace changes, then apply stashed changes on top
git_repo.add(A=True)
try:
git_repo.stash("apply", "stash@{0}")
except GitCommandError:
# stash apply will return error code on merge conflicts,
# prefer workspace changes over stash changes
git_repo.checkout("--ours", "--", ".")
# unstage changes and drop the stash entry
git_repo.reset("HEAD")
git_repo.stash("drop", "stash@{0}")
@scm_locked
def get_baseline(self, rev):
"""Return the baseline rev for an experiment rev."""
return self._get_baseline(rev)
def _get_baseline(self, rev):
from git.exc import GitCommandError
rev = self.scm.resolve_rev(rev)
try:
name = self.scm.repo.git.name_rev(rev, name_only=True)
except GitCommandError:
return None
if name | |
import glob
import os
from copy import copy, deepcopy
from dataclasses import dataclass, field
from typing import List, Optional, Tuple, Union
import imageio
import numpy as np
import pandas as pd
from PIL import Image
from fedot.core.data.load_data import JSONBatchLoader, TextBatchLoader
from fedot.core.data.merge import DataMerger
from fedot.core.data.supplementary_data import SupplementaryData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
@dataclass
class Data:
"""
Base Data type class
"""
idx: np.array
features: np.array
task: Task
data_type: DataTypesEnum
# Object with supplementary info
supplementary_data: SupplementaryData = field(default_factory=SupplementaryData)
@staticmethod
def from_csv(file_path=None,
delimiter=',',
task: Task = Task(TaskTypesEnum.classification),
data_type: DataTypesEnum = DataTypesEnum.table,
columns_to_drop: Optional[List] = None,
target_columns: Union[str, List] = ''):
"""
:param file_path: the path to the CSV with data
:param columns_to_drop: the names of columns that should be dropped
:param delimiter: the delimiter to separate the columns
:param task: the task that should be solved with data
:param data_type: the type of data interpretation
:param target_columns: name of target column (last column if empty and no target if None)
:return:
"""
data_frame = pd.read_csv(file_path, sep=delimiter)
if columns_to_drop:
data_frame = data_frame.drop(columns_to_drop, axis=1)
# Get indices of the DataFrame
data_array = np.array(data_frame).T
idx = data_array[0]
if isinstance(idx[0], float) and idx[0] == round(idx[0]):
# if float indices is unnecessary
idx = [str(round(i)) for i in idx]
if type(target_columns) is list:
features, target = process_multiple_columns(target_columns, data_frame)
else:
features, target = process_one_column(target_columns, data_frame,
data_array)
return InputData(idx=idx, features=features, target=target, task=task, data_type=data_type)
@staticmethod
def from_csv_time_series(task: Task,
file_path=None,
delimiter=',',
is_predict=False,
target_column: Optional[str] = ''):
df = pd.read_csv(file_path, sep=delimiter)
idx = get_indices_from_file(df, file_path)
if target_column is not None:
time_series = np.array(df[target_column])
else:
time_series = np.array(df[df.columns[-1]])
if is_predict:
# Prepare data for prediction
len_forecast = task.task_params.forecast_length
start_forecast = len(time_series)
end_forecast = start_forecast + len_forecast
input_data = InputData(idx=np.arange(start_forecast, end_forecast),
features=time_series,
target=None,
task=task,
data_type=DataTypesEnum.ts)
else:
# Prepare InputData for train the pipeline
input_data = InputData(idx=idx,
features=time_series,
target=time_series,
task=task,
data_type=DataTypesEnum.ts)
return input_data
@staticmethod
def from_image(images: Union[str, np.ndarray] = None,
labels: Union[str, np.ndarray] = None,
task: Task = Task(TaskTypesEnum.classification),
target_size: Optional[Tuple[int, int]] = None):
"""
:param images: the path to the directory with image data in np.ndarray format or array in np.ndarray format
:param labels: the path to the directory with image labels in np.ndarray format or array in np.ndarray format
:param task: the task that should be solved with data
:param target_size: size for the images resizing (if necessary)
:return:
"""
features = images
target = labels
if type(images) is str:
# if upload from path
if '*.jpeg' in images:
# upload from folder of images
path = images
images_list = []
for file_path in glob.glob(path):
if target_size is not None:
img = _resize_image(file_path, target_size)
images_list.append(img)
else:
raise ValueError('Set target_size for images')
features = np.asarray(images_list)
target = labels
else:
# upload from array
features = np.load(images)
target = np.load(labels)
idx = np.arange(0, len(features))
return InputData(idx=idx, features=features, target=target, task=task, data_type=DataTypesEnum.image)
@staticmethod
def from_text_meta_file(meta_file_path: str = None,
label: str = 'label',
task: Task = Task(TaskTypesEnum.classification),
data_type: DataTypesEnum = DataTypesEnum.text):
if os.path.isdir(meta_file_path):
raise ValueError("""CSV file expected but got directory""")
df_text = pd.read_csv(meta_file_path)
df_text = df_text.sample(frac=1).reset_index(drop=True)
messages = df_text['text'].astype('U').tolist()
features = np.array(messages)
target = np.array(df_text[label])
idx = [index for index in range(len(target))]
return InputData(idx=idx, features=features,
target=target, task=task, data_type=data_type)
@staticmethod
def from_text_files(files_path: str,
label: str = 'label',
task: Task = Task(TaskTypesEnum.classification),
data_type: DataTypesEnum = DataTypesEnum.text):
if os.path.isfile(files_path):
raise ValueError("""Path to the directory expected but got file""")
df_text = TextBatchLoader(path=files_path).extract()
features = np.array(df_text['text'])
target = np.array(df_text[label])
idx = [index for index in range(len(target))]
return InputData(idx=idx, features=features,
target=target, task=task, data_type=data_type)
@staticmethod
def from_json_files(files_path: str,
fields_to_use: List,
label: str = 'label',
task: Task = Task(TaskTypesEnum.classification),
data_type: DataTypesEnum = DataTypesEnum.table,
export_to_meta=False, is_multilabel=False) -> 'InputData':
"""
Generates InputData from the set of JSON files with different fields
:param files_path: path the folder with jsons
:param fields_to_use: list of fields that will be considered as a features
:param label: name of field with target variable
:param task: task to solve
:param data_type: data type in fields (as well as type for obtained InputData)
:param export_to_meta: combine extracted field and save to CSV
:param is_multilabel: if True, creates multilabel target
:return: combined dataset
"""
if os.path.isfile(files_path):
raise ValueError("""Path to the directory expected but got file""")
df_data = JSONBatchLoader(path=files_path, label=label, fields_to_use=fields_to_use).extract(export_to_meta)
if len(fields_to_use) > 1:
fields_to_combine = []
for f in fields_to_use:
fields_to_combine.append(np.array(df_data[f]))
features = np.column_stack(tuple(fields_to_combine))
else:
val = df_data[fields_to_use[0]]
# process field with nested list
if isinstance(val[0], list):
val = [' '.join(v) for v in val]
features = np.array(val)
if is_multilabel:
target = df_data[label]
classes = set()
for el in target:
for label in el:
classes.add(label)
count_classes = list(sorted(classes))
multilabel_target = np.zeros((len(features), len(count_classes)))
for i in range(len(target)):
for el in target[i]:
multilabel_target[i][count_classes.index(el)] = 1
target = multilabel_target
else:
target = np.array(df_data[label])
idx = [index for index in range(len(target))]
return InputData(idx=idx, features=features,
target=target, task=task, data_type=data_type)
@dataclass
class InputData(Data):
"""
Data class for input data for the nodes
"""
target: Optional[np.array] = None
@property
def num_classes(self) -> Optional[int]:
if self.task.task_type == TaskTypesEnum.classification and self.target is not None:
return len(np.unique(self.target))
else:
return None
@staticmethod
def from_predictions(outputs: List['OutputData']):
""" Method obtain predictions from previous nodes """
# Update not only features but idx, target and task also
idx, features, target, task, d_type, updated_info = DataMerger(outputs).merge()
# Mark data as preprocessed already
updated_info.was_preprocessed = True
return InputData(idx=idx, features=features, target=target, task=task,
data_type=d_type, supplementary_data=updated_info)
def subset_range(self, start: int, end: int):
if not (0 <= start <= end <= len(self.idx)):
raise ValueError('Incorrect boundaries for subset')
new_features = None
if self.features is not None:
new_features = self.features[start:end + 1]
return InputData(idx=self.idx[start:end + 1], features=new_features,
target=self.target[start:end + 1],
task=self.task, data_type=self.data_type)
def subset_indices(self, selected_idx: List):
"""
Get subset from InputData to extract all items with specified indices
:param selected_idx: list of indices for extraction
:return:
"""
idx_list = [str(i) for i in self.idx]
# extractions of row number for each existing index from selected_idx
row_nums = [idx_list.index(str(selected_ind)) for selected_ind in selected_idx
if str(selected_ind) in idx_list]
new_features = None
if self.features is not None:
new_features = self.features[row_nums]
return InputData(idx=np.asarray(self.idx)[row_nums], features=new_features,
target=self.target[row_nums],
task=self.task, data_type=self.data_type)
def subset_features(self, features_ids: list):
""" Return new InputData with subset of features based on features_ids list """
subsample_features = self.features[:, features_ids]
subsample_input = InputData(features=subsample_features,
data_type=self.data_type,
target=self.target, task=self.task,
idx=self.idx,
supplementary_data=self.supplementary_data)
return subsample_input
def shuffle(self):
"""
Shuffles features and target if possible
"""
if self.data_type == DataTypesEnum.table:
shuffled_ind = np.random.permutation(len(self.features))
idx, features, target = np.asarray(self.idx)[shuffled_ind], self.features[shuffled_ind], self.target[
shuffled_ind]
self.idx = idx
self.features = features
self.target = target
else:
pass
def convert_non_int_indexes_for_fit(self, pipeline):
""" Conversion non int (datetime, string, etc) indexes in integer form in fit stage """
copied_data = deepcopy(self)
is_timestamp = isinstance(copied_data.idx[0], pd._libs.tslibs.timestamps.Timestamp)
is_numpy_datetime = isinstance(copied_data.idx[0], np.datetime64)
# if fit stage- just creating range of integers
if is_timestamp or is_numpy_datetime:
copied_data.supplementary_data.non_int_idx = copy(copied_data.idx)
copied_data.idx = np.array(range(len(copied_data.idx)))
last_idx_time = copied_data.supplementary_data.non_int_idx[-1]
pre_last_time = copied_data.supplementary_data.non_int_idx[-2]
pipeline.last_idx_int = copied_data.idx[-1]
pipeline.last_idx_dt = last_idx_time
pipeline.period = last_idx_time - pre_last_time
elif type(copied_data.idx[0]) not in [int, np.int32, np.int64]:
copied_data.supplementary_data.non_int_idx = copy(copied_data.idx)
copied_data.idx = np.array(range(len(copied_data.idx)))
pipeline.last_idx_int = copied_data.idx[-1]
return copied_data
def convert_non_int_indexes_for_predict(self, pipeline):
"""Conversion non int (datetime, string, etc) indexes in integer form in predict stage"""
copied_data = deepcopy(self)
is_timestamp = isinstance(copied_data.idx[0], pd._libs.tslibs.timestamps.Timestamp)
is_numpy_datetime = isinstance(copied_data.idx[0], np.datetime64)
# if predict stage - calculating shift from last train part index
if is_timestamp or is_numpy_datetime:
copied_data.supplementary_data.non_int_idx = copy(self.idx)
copied_data.idx = self._resolve_non_int_idx(pipeline)
elif type(copied_data.idx[0]) not in [int, np.int32, np.int64]:
# note, that string indexes do not have an order and always we think that indexes we want to predict go
# immediately after the train indexes
copied_data.supplementary_data.non_int_idx = copy(copied_data.idx)
copied_data.idx = pipeline.last_idx_int + np.array(range(1, len(copied_data.idx)+1))
return copied_data
@staticmethod
def _resolve_func(pipeline, x):
return pipeline.last_idx_int + (x - pipeline.last_idx_dt) // pipeline.period
def _resolve_non_int_idx(self, pipeline):
return np.array(list(map(lambda x: self._resolve_func(pipeline, x), self.idx)))
@dataclass
class OutputData(Data):
"""
Data type for data prediction in the node
"""
predict: np.array = None
target: Optional[np.array] = None
def _resize_image(file_path: str, target_size: tuple):
im = Image.open(file_path)
im_resized = im.resize(target_size, Image.NEAREST)
im_resized.save(file_path, 'jpeg')
img = np.asarray(imageio.imread(file_path, 'jpeg'))
if len(img.shape) == 3:
# TODO refactor for multi-color
img = img[..., 0] + img[..., 1] + img[..., 2]
return img
def process_one_column(target_column, data_frame, data_array):
""" | |
<reponame>DAQuestionAnswering/Bert-n-Pals
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from itertools import cycle
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import numpy as np
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import tokenization
from modeling import BertConfig, BertForSequenceClassification, BertForMultiTask
from optimization import BERTAdam
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class STSProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ['None']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[7])
text_b = tokenization.convert_to_unicode(line[8])
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QQPProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 or len(line) != 6:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QNLIProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["not_entailment", "entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RTEProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["not_entailment", "entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SSTProcessor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, task='none'):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but | |
import logging
import math
import slidingwindow as sw
import cv2
import numpy as np
import tensorflow as tf
import time
from tf_pose import common
from tf_pose.common import CocoPart
from tf_pose.tensblur.smoother import Smoother
import datetime
## Initialize variables starts here
sess = None
def initialize_variables():
global x,y,LABELS, n_steps, n_input, n_hidden, n_classes, pred,cost, pred, accuracy, optimizer,sess
LABELS = ["NO ACTION", "JUMPING_JACKS", "BOXING", "waving 2 hands", "waving 1 hand", "clapping"]
n_steps = 32
n_input = 36
n_hidden = 34
n_classes = 6
decaying_learning_rate = True
learning_rate = 0.0025 #used if decaying_learning_rate set to False
init_learning_rate = 0.005
decay_rate = 0.96 #the base of the exponential in the decay
decay_steps = 100000 #used in decay every 60000 steps with a base of 0.96
global_step = tf.Variable(0, trainable=False)
lambda_loss_amount = 0.0015
batch_size = 512
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM_RNN(x, weights, biases)
l2 = lambda_loss_amount * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2
if decaying_learning_rate:
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, "./new_model/new_model.ckpt")
print("Variables and model initialized")
return (sess, accuracy, pred, optimizer)
def getPose(X):
one_hot_predictions, accuracy1, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X,
y: np.array([[0. , 0., 0., 0., 0., 2.]])
}
)
#label = LABELS[np.argmax(one_hot_predictions[0])]
score = np.amax(one_hot_predictions[0])
if score > 4.5:
label = label = LABELS[np.argmax(one_hot_predictions[0])]
else:
label = "No Action"
#print("[", datetime.datetime.now(), "]: ",one_hot_predictions[0], label)
return label
def load_X(X):
X_ = np.asarray(X, dtype=np.float32)
X_ = X_[np.newaxis, :, :]
return X_
# Load the networks outputs
def load_y(y_path):
file = open(y_path, 'r')
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# for 0-based indexing
return y_ - 1
def LSTM_RNN(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectifies Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
#print("#######",batch_s, batch_labels)
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
#print("i",index)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
## Code ends here
try:
from tf_pose.pafprocess import pafprocess
except ModuleNotFoundError as e:
print(e)
print('you need to build c++ library for pafprocess. See : https://github.com/ildoonet/tf-pose-estimation/tree/master/tf_pose/pafprocess')
exit(-1)
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def _round(v):
return int(round(v))
def _include_part(part_list, part_idx):
for part in part_list:
if part_idx == part.part_idx:
return True, part
return False, None
class Human:
"""
body_parts: list of BodyPart
"""
__slots__ = ('body_parts', 'pairs', 'uidx_list', 'score')
def __init__(self, pairs):
self.pairs = []
self.uidx_list = set()
self.body_parts = {}
for pair in pairs:
self.add_pair(pair)
self.score = 0.0
@staticmethod
def _get_uidx(part_idx, idx):
return '%d-%d' % (part_idx, idx)
def add_pair(self, pair):
self.pairs.append(pair)
self.body_parts[pair.part_idx1] = BodyPart(Human._get_uidx(pair.part_idx1, pair.idx1),
pair.part_idx1,
pair.coord1[0], pair.coord1[1], pair.score)
self.body_parts[pair.part_idx2] = BodyPart(Human._get_uidx(pair.part_idx2, pair.idx2),
pair.part_idx2,
pair.coord2[0], pair.coord2[1], pair.score)
self.uidx_list.add(Human._get_uidx(pair.part_idx1, pair.idx1))
self.uidx_list.add(Human._get_uidx(pair.part_idx2, pair.idx2))
def is_connected(self, other):
return len(self.uidx_list & other.uidx_list) > 0
def merge(self, other):
for pair in other.pairs:
self.add_pair(pair)
def part_count(self):
return len(self.body_parts.keys())
def get_max_score(self):
return max([x.score for _, x in self.body_parts.items()])
def get_face_box(self, img_w, img_h, mode=0):
"""
Get Face box compared to img size (w, h)
:param img_w:
:param img_h:
:param mode:
:return:
"""
# SEE : https://github.com/ildoonet/tf-pose-estimation/blob/master/tf_pose/common.py#L13
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_REye = CocoPart.REye.value
_LEye = CocoPart.LEye.value
_REar = CocoPart.REar.value
_LEar = CocoPart.LEar.value
_THRESHOLD_PART_CONFIDENCE = 0.2
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
is_nose, part_nose = _include_part(parts, _NOSE)
if not is_nose:
return None
size = 0
is_neck, part_neck = _include_part(parts, _NECK)
if is_neck:
size = max(size, img_h * (part_neck.y - part_nose.y) * 0.8)
is_reye, part_reye = _include_part(parts, _REye)
is_leye, part_leye = _include_part(parts, _LEye)
if is_reye and is_leye:
size = max(size, img_w * (part_reye.x - part_leye.x) * 2.0)
size = max(size,
img_w * math.sqrt((part_reye.x - part_leye.x) ** 2 + (part_reye.y - part_leye.y) ** 2) * 2.0)
if mode == 1:
if not is_reye and not is_leye:
return None
is_rear, part_rear = _include_part(parts, _REar)
is_lear, part_lear = _include_part(parts, _LEar)
if is_rear and is_lear:
size = max(size, img_w * (part_rear.x - part_lear.x) * 1.6)
if size <= 0:
return None
if not is_reye and is_leye:
x = part_nose.x * img_w - (size // 3 * 2)
elif is_reye and not is_leye:
x = part_nose.x * img_w - (size // 3)
else: # is_reye and is_leye:
x = part_nose.x * img_w - size // 2
x2 = x + size
if mode == 0:
y = part_nose.y * img_h - size // 3
else:
y = part_nose.y * img_h - _round(size / 2 * 1.2)
y2 = y + size
# fit into the image frame
x = max(0, x)
y = max(0, y)
x2 = min(img_w - x, x2 - x) + x
y2 = min(img_h - y, y2 - y) + y
if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:
return None
if mode == 0:
return {"x": _round((x + x2) / 2),
"y": _round((y + y2) / 2),
"w": _round(x2 - x),
"h": _round(y2 - y)}
else:
return {"x": _round(x),
"y": _round(y),
"w": _round(x2 - x),
"h": _round(y2 - y)}
def get_upper_body_box(self, img_w, img_h):
"""
Get Upper body box compared to img size (w, h)
:param img_w:
:param img_h:
:return:
"""
if not (img_w > 0 and img_h > 0):
raise Exception("img size should be positive")
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_RSHOULDER = CocoPart.RShoulder.value
_LSHOULDER = CocoPart.LShoulder.value
_THRESHOLD_PART_CONFIDENCE = 0.3
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
part_coords = [(img_w * part.x, img_h * part.y) for part in parts if
part.part_idx in [0, 1, 2, 5, 8, 11, 14, 15, 16, 17]]
if len(part_coords) < 5:
return None
# Initial Bounding Box
x = min([part[0] for part in part_coords])
y = min([part[1] for part in part_coords])
x2 = max([part[0] for part in part_coords])
y2 = max([part[1] for part in part_coords])
# # ------ Adjust heuristically +
# if face points are detcted, adjust y value
is_nose, part_nose = _include_part(parts, _NOSE)
is_neck, part_neck = _include_part(parts, _NECK)
torso_height = 0
if is_nose and is_neck:
y -= (part_neck.y * img_h - y) * 0.8
torso_height = max(0, (part_neck.y - part_nose.y) * img_h * 2.5)
#
# # by using shoulder position, adjust width
is_rshoulder, part_rshoulder = | |
Player 0 rolls 1 dice and gets outcomes [6].
End scores = (103, 24)
>>> print(turns[7])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=8964, score0=79, score1=56, goal=83, feral_hogs=False)
>>> print(turns[0])
Start scores = (79, 56).
Player 0 rolls 7 dice and gets outcomes [5, 2, 3, 6, 6, 1, 6].
End scores = (80, 56)
>>> print(turns[1])
Start scores = (80, 56).
Player 1 rolls 4 dice and gets outcomes [1, 2, 5, 1].
End scores = (80, 57)
>>> print(turns[2])
Start scores = (80, 57).
Player 0 rolls 9 dice and gets outcomes [2, 5, 6, 3, 5, 6, 6, 1, 4].
End scores = (81, 57)
>>> print(turns[3])
Start scores = (81, 57).
Player 1 rolls 8 dice and gets outcomes [6, 3, 3, 3, 3, 2, 6, 3].
End scores = (81, 86)
>>> print(turns[4])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=24932, score0=12, score1=0, goal=14, feral_hogs=False)
>>> print(turns[0])
Start scores = (12, 0).
Player 0 rolls 6 dice and gets outcomes [1, 1, 1, 3, 3, 2].
End scores = (13, 0)
>>> print(turns[1])
Start scores = (13, 0).
Player 1 rolls 8 dice and gets outcomes [4, 1, 5, 4, 3, 3, 5, 1].
End scores = (13, 1)
>>> print(turns[2])
Start scores = (13, 1).
Player 0 rolls 4 dice and gets outcomes [5, 3, 2, 3].
End scores = (1, 26)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=76726, score0=40, score1=73, goal=93, feral_hogs=False)
>>> print(turns[0])
Start scores = (40, 73).
Player 0 rolls 5 dice and gets outcomes [6, 6, 1, 2, 5].
End scores = (41, 73)
>>> print(turns[1])
Start scores = (41, 73).
Player 1 rolls 3 dice and gets outcomes [6, 1, 4].
End scores = (74, 41)
>>> print(turns[2])
Start scores = (74, 41).
Player 0 rolls 3 dice and gets outcomes [2, 1, 6].
End scores = (75, 41)
>>> print(turns[3])
Start scores = (75, 41).
Player 1 rolls 0 dice and gets outcomes [].
End scores = (45, 75)
>>> print(turns[4])
Start scores = (45, 75).
Player 0 rolls 9 dice and gets outcomes [1, 6, 4, 2, 5, 4, 1, 6, 2].
End scores = (46, 75)
>>> print(turns[5])
Start scores = (46, 75).
Player 1 rolls 5 dice and gets outcomes [5, 5, 3, 6, 1].
End scores = (46, 76)
>>> print(turns[6])
Start scores = (46, 76).
Player 0 rolls 10 dice and gets outcomes [2, 2, 5, 2, 4, 5, 6, 2, 5, 4].
End scores = (76, 83)
>>> print(turns[7])
Start scores = (76, 83).
Player 1 rolls 3 dice and gets outcomes [2, 3, 3].
End scores = (76, 91)
>>> print(turns[8])
Start scores = (76, 91).
Player 0 rolls 2 dice and gets outcomes [6, 3].
End scores = (85, 91)
>>> print(turns[9])
Start scores = (85, 91).
Player 1 rolls 8 dice and gets outcomes [1, 2, 3, 3, 3, 1, 5, 1].
End scores = (85, 92)
>>> print(turns[10])
Start scores = (85, 92).
Player 0 rolls 10 dice and gets outcomes [1, 1, 5, 1, 4, 1, 2, 5, 3, 1].
End scores = (86, 92)
>>> print(turns[11])
Start scores = (86, 92).
Player 1 rolls 5 dice and gets outcomes [3, 4, 4, 6, 1].
End scores = (86, 93)
>>> print(turns[12])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=85393, score0=3, score1=0, goal=44, feral_hogs=True)
>>> print(turns[0])
Start scores = (3, 0).
Player 0 rolls 3 dice and gets outcomes [3, 2, 5].
End scores = (13, 0)
>>> print(turns[1])
Start scores = (13, 0).
Player 1 rolls 6 dice and gets outcomes [1, 6, 3, 2, 6, 5].
End scores = (13, 1)
>>> print(turns[2])
Start scores = (13, 1).
Player 0 rolls 1 dice and gets outcomes [3].
End scores = (16, 1)
>>> print(turns[3])
Start scores = (16, 1).
Player 1 rolls 10 dice and gets outcomes [4, 2, 4, 6, 3, 2, 1, 1, 2, 3].
End scores = (16, 2)
>>> print(turns[4])
Start scores = (16, 2).
Player 0 rolls 2 dice and gets outcomes [4, 3].
End scores = (23, 2)
>>> print(turns[5])
Start scores = (23, 2).
Player 1 rolls 0 dice and gets outcomes [].
End scores = (4, 23)
>>> print(turns[6])
Start scores = (4, 23).
Player 0 rolls 3 dice and gets outcomes [2, 5, 1].
End scores = (5, 23)
>>> print(turns[7])
Start scores = (5, 23).
Player 1 rolls 8 dice and gets outcomes [6, 1, 1, 1, 1, 6, 4, 4].
End scores = (5, 24)
>>> print(turns[8])
Start scores = (5, 24).
Player 0 rolls 9 dice and gets outcomes [3, 4, 4, 1, 6, 2, 3, 5, 1].
End scores = (6, 24)
>>> print(turns[9])
Start scores = (6, 24).
Player 1 rolls 1 dice and gets outcomes [1].
End scores = (6, 25)
>>> print(turns[10])
Start scores = (6, 25).
Player 0 rolls 4 dice and gets outcomes [4, 6, 1, 3].
End scores = (25, 7)
>>> print(turns[11])
Start scores = (25, 7).
Player 1 rolls 9 dice and gets outcomes [4, 6, 5, 3, 1, 1, 2, 6, 3].
End scores = (25, 8)
>>> print(turns[12])
Start scores = (25, 8).
Player 0 rolls 7 dice and gets outcomes [2, 4, 1, 5, 4, 2, 1].
End scores = (26, 8)
>>> print(turns[13])
Start scores = (26, 8).
Player 1 rolls 9 dice and gets outcomes [1, 1, 4, 6, 2, 5, 6, 4, 6].
End scores = (26, 9)
>>> print(turns[14])
Start scores = (26, 9).
Player 0 rolls 2 dice and gets outcomes [2, 6].
End scores = (34, 9)
>>> print(turns[15])
Start scores = (34, 9).
Player 1 rolls 8 dice and gets outcomes [6, 3, 1, 3, 3, 6, 2, 6].
End scores = (34, 10)
>>> print(turns[16])
Start scores = (34, 10).
Player 0 rolls 3 dice and gets outcomes [2, 3, 3].
End scores = (42, 10)
>>> print(turns[17])
Start scores = (42, 10).
Player 1 rolls 4 dice and gets outcomes [5, 2, 3, 1].
End scores = (42, 11)
>>> print(turns[18])
Start scores = (42, 11).
Player 0 rolls 2 dice and gets outcomes [1, 5].
End scores = (43, 11)
>>> print(turns[19])
Start scores = (43, 11).
Player 1 rolls 10 dice and gets outcomes [3, 5, 6, 3, 6, 2, 5, 5, 5, 2].
End scores = (43, 53)
>>> print(turns[20])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=35702, score0=10, score1=13, goal=14, feral_hogs=True)
>>> print(turns[0])
Start scores = (10, 13).
Player 0 rolls 4 dice and gets outcomes [5, 4, 6, 2].
End scores = (13, 27)
>>> print(turns[1])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=75713, score0=62, score1=6, goal=63, feral_hogs=False)
>>> print(turns[0])
Start scores = (62, 6).
Player 0 rolls 7 dice and gets outcomes [1, 6, 2, 6, 4, 4, 6].
End scores = (63, 6)
>>> print(turns[1])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=14879, score0=24, score1=8, goal=29, feral_hogs=False)
>>> print(turns[0])
Start scores = (24, 8).
Player 0 rolls 4 dice and gets outcomes [1, 1, 6, 6].
End scores = (25, 8)
>>> print(turns[1])
Start scores = (25, 8).
Player 1 rolls 8 dice and gets outcomes [2, 6, 5, 1, 4, 6, 3, 4].
End scores = (25, 9)
>>> print(turns[2])
Start scores = (25, 9).
Player 0 rolls | |
"""Validation class for ChemKED schema.
"""
from warnings import warn
import re
from pkg_resources import resource_filename
import yaml
import numpy as np
import pint
from requests.exceptions import HTTPError, ConnectionError
from cerberus import Validator, SchemaError
import habanero
from .orcid import search_orcid
units = pint.UnitRegistry()
"""Unit registry to contain the units used in PyKED"""
units.define('cm3 = centimeter**3')
Q_ = units.Quantity
crossref_api = habanero.Crossref(mailto='<EMAIL>')
# Load the ChemKED schema definition file
schema_file = resource_filename(__name__, 'schemas/chemked_schema.yaml')
with open(schema_file, 'r') as f:
schema_list = f.readlines()
inc_start = None
inc_end = None
inc_list = []
no_includes = False
for l_num, l in enumerate(schema_list):
if l.startswith('!include'):
if no_includes: # pragma: no cover
raise SchemaError('All included files must be first in the main schema')
if inc_start is None:
inc_start = l_num
if inc_end is not None: # pragma: no cover
raise SchemaError('All included files must be first in the main schema')
inc_fname = l.split('!include')[1].strip()
inc_fname = resource_filename(__name__, 'schemas/' + inc_fname)
with open(inc_fname, 'r') as f:
inc_list.extend(f.readlines())
else:
if not l.strip() or l.startswith('#') or l.startswith('---'):
continue
if inc_start is None: # pragma: no cover
no_includes = True
if inc_start is not None and inc_end is None:
inc_end = l_num
schema_list[inc_start:inc_end] = inc_list
schema = yaml.safe_load(''.join(schema_list))
# These top-level keys in the schema serve as references for lower-level keys.
# They are removed to prevent conflicts due to required variables, etc.
for key in ['author', 'value-unit-required', 'value-unit-optional',
'composition', 'ignition-type', 'value-with-uncertainty',
'value-without-uncertainty',
]:
del schema[key]
# SI units for available value-type properties
property_units = {
'temperature': 'kelvin',
'compressed-temperature': 'kelvin',
'pressure': 'pascal',
'compressed-pressure': 'pascal',
'ignition-delay': 'second',
'first-stage-ignition-delay': 'second',
'pressure-rise': '1.0 / second',
'compression-time': 'second',
'volume': 'meter**3',
'time': 'second',
'piston position': 'meter',
'emission': 'dimensionless',
'absorption': 'dimensionless',
'concentration': 'mole/meter**3',
'stroke': 'meter',
'clearance': 'meter',
'compression-ratio': 'dimensionless',
}
def compare_name(given_name, family_name, question_name):
"""Compares a name in question to a specified name separated into given and family.
The name in question ``question_name`` can be of varying format, including
"<NAME>", "<NAME>", "<NAME>", "<NAME>", and
"<NAME>". Other possibilities include names with hyphens such as
"<NAME>", "<NAME>", "C-<NAME>".
Examples:
>>> compare_name('Kyle', 'Niemeyer', '<NAME>')
True
>>> compare_name('Chih-Jen', 'Sung', 'C-<NAME>')
True
Args:
given_name (`str`): Given (or first) name to be checked against.
family_name (`str`): Family (or last) name to be checked against.
question_name (`str`): The whole name in question.
Returns:
`bool`: The return value. True for successful comparison, False otherwise.
"""
# lowercase everything
given_name = given_name.lower()
family_name = family_name.lower()
question_name = question_name.lower()
# rearrange names given as "last, first middle"
if ',' in question_name:
name_split = question_name.split(',')
name_split.reverse()
question_name = ' '.join(name_split).strip()
# remove periods
question_name = question_name.replace('.', '')
given_name = given_name.replace('.', '')
family_name = family_name.replace('.', '')
# split names by , <space> - .
given_name = list(filter(None, re.split(r"[, \-.]+", given_name)))
num_family_names = len(list(filter(None, re.split("[, .]+", family_name))))
# split name in question by , <space> - .
name_split = list(filter(None, re.split(r"[, \-.]+", question_name)))
first_name = [name_split[0]]
if len(name_split) > 2:
first_name += [n for n in name_split[1:-num_family_names]]
if len(first_name) > 1 and len(given_name) == len(first_name):
# both have same number of first and middle names/initials
for i in range(1, len(first_name)):
first_name[i] = first_name[i][0]
given_name[i] = given_name[i][0]
elif len(given_name) != len(first_name):
min_names = min(len(given_name), len(first_name))
first_name = first_name[:min_names]
given_name = given_name[:min_names]
# first initial
if len(first_name[0]) == 1 or len(given_name[0]) == 1:
given_name[0] = given_name[0][0]
first_name[0] = first_name[0][0]
# first and middle initials combined
if len(first_name[0]) > 1 or len(given_name[0]) > 1:
given_name[0] = given_name[0][0]
first_name[0] = name_split[0][0]
# Hyphenated last name may need to be reconnected
if num_family_names == 1 and '-' in family_name:
num_hyphen = family_name.count('-')
family_name_compare = '-'.join(name_split[-(num_hyphen + 1):])
else:
family_name_compare = ' '.join(name_split[-num_family_names:])
return given_name == first_name and family_name == family_name_compare
class OurValidator(Validator):
"""Custom validator with rules for Quantities and references.
"""
def _validate_isvalid_t_range(self, isvalid_t_range, field, values):
"""Checks that the temperature ranges given for thermo data are valid
Args:
isvalid_t_range (`bool`): flag from schema indicating T range is to be checked
field (`str`): T_range
values (`list`): List of temperature values indicating low, middle, and high ranges
The rule's arguments are validated against this schema:
{'isvalid_t_range': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
if all([isinstance(v, (float, int)) for v in values]):
# If no units given, assume Kelvin
T_low = Q_(values[0], 'K')
T_mid = Q_(values[1], 'K')
T_hi = Q_(values[2], 'K')
elif all([isinstance(v, str) for v in values]):
T_low = Q_(values[0])
T_mid = Q_(values[1])
T_hi = Q_(values[2])
else:
self._error(field, 'The temperatures in the range must all be either with units or '
'without units, they cannot be mixed')
return False
if min([T_low, T_mid, T_hi]) != T_low:
self._error(field, 'The first element of the T_range must be the lower limit')
if max([T_low, T_mid, T_hi]) != T_hi:
self._error(field, 'The last element of the T_range must be the upper limit')
def _validate_isvalid_unit(self, isvalid_unit, field, value):
"""Checks for appropriate units using Pint unit registry.
Args:
isvalid_unit (`bool`): flag from schema indicating units to be checked.
field (`str`): property associated with units in question.
value (`dict`): dictionary of values from file associated with this property.
The rule's arguments are validated against this schema:
{'isvalid_unit': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'dict'}}
"""
quantity = 1.0 * units(value['units'])
try:
quantity.to(property_units[field])
except pint.DimensionalityError:
self._error(field, 'incompatible units; should be consistent '
'with ' + property_units[field]
)
def _validate_isvalid_history(self, isvalid_history, field, value):
"""Checks that the given time history is properly formatted.
Args:
isvalid_history (`bool`): flag from schema indicating units to be checked.
field (`str`): property associated with history in question.
value (`dict`): dictionary of values from file associated with this property.
The rule's arguments are validated against this schema:
{'isvalid_history': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'dict'}}
"""
# Check the type has appropriate units
history_type = value['type']
if history_type.endswith('emission'):
history_type = 'emission'
elif history_type.endswith('absorption'):
history_type = 'absorption'
quantity = 1.0*(units(value['quantity']['units']))
try:
quantity.to(property_units[history_type])
except pint.DimensionalityError:
self._error(field, 'incompatible units; should be consistent '
'with ' + property_units[history_type])
# Check that time has appropriate units
time = 1.0*(units(value['time']['units']))
try:
time.to(property_units['time'])
except pint.DimensionalityError:
self._error(field, 'incompatible units; should be consistent '
'with ' + property_units['time'])
# Check that the values have the right number of columns
n_cols = len(value['values'][0])
max_cols = max(value['time']['column'],
value['quantity']['column'],
value.get('uncertainty', {}).get('column', 0)) + 1
if n_cols > max_cols:
self._error(field, 'too many columns in the values')
elif n_cols < max_cols:
self._error(field, 'not enough columns in the values')
def _validate_isvalid_quantity(self, isvalid_quantity, field, value):
"""Checks for valid given value and appropriate units.
Args:
isvalid_quantity (`bool`): flag from schema indicating quantity to be checked.
field (`str`): property associated with quantity in question.
value (`list`): list whose first element is a string representing a value with units
The rule's arguments are validated against this schema:
{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
quantity = Q_(value[0])
low_lim = 0.0 * units(property_units[field])
try:
if quantity <= low_lim:
self._error(
field, 'value must be greater than 0.0 {}'.format(property_units[field]),
)
except pint.DimensionalityError:
self._error(field, 'incompatible units; should be consistent '
'with ' + property_units[field]
)
def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value):
"""Checks for valid given value and appropriate units with uncertainty.
Args:
isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked
field (`str`): property associated with the quantity in question.
value (`list`): list with the string of the value of the quantity and a dictionary of
the uncertainty
The rule's arguments are validated against this schema:
{'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}}
"""
self._validate_isvalid_quantity(True, field, value)
# This len check is necessary for reasons that aren't quite clear to me
# Cerberus calls this validation method even when lists have only one element
# and should therefore be validated only by isvalid_quantity
if len(value) > 1 and value[1]['uncertainty-type'] != 'relative':
if value[1].get('uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']])
if value[1].get('upper-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']])
if value[1].get('lower-uncertainty') is not None:
self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']])
def _validate_isvalid_reference(self, isvalid_reference, field, value):
"""Checks valid reference metadata using DOI (if present).
Args:
isvalid_reference (`bool`): flag from schema indicating reference to be checked.
field (`str`): 'reference'
value (`dict`): dictionary of reference metadata.
The rule's arguments are validated against this schema:
{'isvalid_reference': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'dict'}}
| |
= len(freq_array)
channel_width = np.median(np.diff(freq_array))
freq_array = freq_array.reshape(1, -1)
spw_array = np.array([0])
Nspws = 1
# get baselines keys
antpairs = sorted(data.antpairs())
Nbls = len(antpairs)
Nblts = Nbls * Ntimes
# reconfigure time_array and lst_array
time_array = np.repeat(time_array[np.newaxis], Nbls, axis=0).ravel()
lst_array = np.repeat(lst_array[np.newaxis], Nbls, axis=0).ravel()
# get data array
data_array = np.moveaxis(map(lambda p: map(lambda ap: data[str(p)][ap], antpairs), pols), 0, -1)
# resort time and baseline axes
data_array = data_array.reshape(Nblts, 1, Nfreqs, Npols)
if nsamples is None:
nsample_array = np.ones_like(data_array, np.float)
else:
nsample_array = np.moveaxis(map(lambda p: map(lambda ap: nsamples[str(p)][ap], antpairs), pols), 0, -1)
nsample_array = nsample_array.reshape(Nblts, 1, Nfreqs, Npols)
# flags
if flags is None:
flag_array = np.zeros_like(data_array, np.float).astype(np.bool)
else:
flag_array = np.moveaxis(map(lambda p: map(lambda ap: flags[str(p)][ap].astype(np.bool), antpairs), pols), 0, -1)
flag_array = flag_array.reshape(Nblts, 1, Nfreqs, Npols)
# configure baselines
antpairs = np.repeat(np.array(antpairs), Ntimes, axis=0)
# get ant_1_array, ant_2_array
ant_1_array = antpairs[:, 0]
ant_2_array = antpairs[:, 1]
# get baseline array
baseline_array = 2048 * (ant_1_array + 1) + (ant_2_array + 1) + 2**16
# get antennas in data
data_ants = np.unique(np.concatenate([ant_1_array, ant_2_array]))
Nants_data = len(data_ants)
# get telescope ants
antenna_numbers = np.unique(antpos.keys())
Nants_telescope = len(antenna_numbers)
antenna_names = map(lambda a: "HH{}".format(a), antenna_numbers)
# set uvw assuming drift phase i.e. phase center is zenith
uvw_array = np.array([antpos[k[1]] - antpos[k[0]] for k in zip(ant_1_array, ant_2_array)])
# get antenna positions in ITRF frame
tel_lat_lon_alt = uvutils.LatLonAlt_from_XYZ(telescope_location)
antenna_positions = np.array(map(lambda k: antpos[k], antenna_numbers))
antenna_positions = uvutils.ECEF_from_ENU(antenna_positions.T, *tel_lat_lon_alt).T - telescope_location
# get zenith location: can only write drift phase
phase_type = 'drift'
# instantiate object
uvd = UVData()
# assign parameters
params = ['Nants_data', 'Nants_telescope', 'Nbls', 'Nblts', 'Nfreqs', 'Npols', 'Nspws', 'Ntimes',
'ant_1_array', 'ant_2_array', 'antenna_names', 'antenna_numbers', 'baseline_array',
'channel_width', 'data_array', 'flag_array', 'freq_array', 'history', 'instrument',
'integration_time', 'lst_array', 'nsample_array', 'object_name', 'phase_type',
'polarization_array', 'spw_array', 'telescope_location', 'telescope_name', 'time_array',
'uvw_array', 'vis_units', 'antenna_positions']
local_params = locals()
# overwrite paramters by kwargs
local_params.update(kwargs)
# set parameters in uvd
for p in params:
uvd.__setattr__(p, local_params[p])
# write to file
if write_file:
if filetype == 'miriad':
# check output
fname = os.path.join(outdir, fname)
if os.path.exists(fname) and overwrite is False:
if verbose:
print("{} exists, not overwriting".format(fname))
else:
if verbose:
print("saving {}".format(fname))
uvd.write_miriad(fname, clobber=True)
else:
raise AttributeError("didn't recognize filetype: {}".format(filetype))
if return_uvd:
return uvd
def update_uvdata(uvd, data=None, flags=None, add_to_history='', **kwargs):
'''Updates a UVData/HERAData object with data or parameters. Cannot modify the shape of
data arrays. More than one spectral window is not supported. Assumes every baseline
has the same times present and that the times are in order.
Arguments:
uv: UVData/HERAData object to be updated
data: dictionary or DataContainer of complex visibility data to update. Keys
like (0,1,'xx') and shape=(Ntimes,Nfreqs). Default (None) does not update.
flags: dictionary or DataContainer of data flags to update.
Default (None) does not update.
add_to_history: appends a string to the history of the UVData/HERAData object
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVData documentation for more info.
'''
# perform update
original_class = uvd.__class__
uvd = to_HERAData(uvd)
uvd.update(data=data, flags=flags)
uvd.__class__ = original_class
# set additional attributes
uvd.history += add_to_history
for attribute, value in kwargs.items():
uvd.__setattr__(attribute, value)
uvd.check()
def update_vis(infilename, outfilename, filetype_in='miriad', filetype_out='miriad',
data=None, flags=None, add_to_history='', clobber=False, **kwargs):
'''Loads an existing file with pyuvdata, modifies some subset of of its parameters, and
then writes a new file to disk. Cannot modify the shape of data arrays. More than one
spectral window is not supported. Assumes every baseline has the same times present
and that the times are in order.
Arguments:
infilename: filename of the base visibility file to be updated, or UVData/HERAData object
outfilename: filename of the new visibility file
filetype_in: either 'miriad' or 'uvfits' (ignored if infile is a UVData/HERAData object)
filetype_out: either 'miriad' or 'uvfits'
data: dictionary or DataContainer of complex visibility data to update. Keys
like (0,1,'xx') and shape=(Ntimes,Nfreqs). Default (None) does not update.
flags: dictionary or DataContainer of data flags to update.
Default (None) does not update.
add_to_history: appends a string to the history of the output file
clobber: if True, overwrites existing file at outfilename. Always True for uvfits.
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVData documentation for more info.
'''
# Load infile
if isinstance(infilename, (UVData, HERAData)):
hd = copy.deepcopy(infilename)
else:
hd = HERAData(infilename, filetype=filetype_in)
hd.read()
update_uvdata(hd, data=data, flags=flags, add_to_history=add_to_history, **kwargs)
# write out results
if filetype_out == 'miriad':
hd.write_miriad(outfilename, clobber=clobber)
elif filetype_out == 'uvfits':
hd.write_uvfits(outfilename, force_phase=True, spoof_nonessential=True)
elif filetype_out == 'uvh5':
hd.write_uvh5(outfilename, clobber=clobber)
else:
raise TypeError("Input filetype must be either 'miriad', 'uvfits', or 'uvh5'.")
def to_HERACal(input_cal):
'''Converts a string path, UVCal, or HERACal object, or a list of any one of those, to a
single HERACal object without loading any new calibration solutions.
Arguments:
input_cal: path to calfits file, UVCal/HERACal object, or a list of either to combine
into a single HERACal object
Returns:
hc: HERACal object. Will not have calibration loaded if initialized from string(s).
'''
if isinstance(input_cal, str): # single calfits path
return HERACal(input_cal)
elif isinstance(input_cal, (UVCal, HERACal)): # single UVCal/HERACal object
input_cal.__class__ = HERACal
return input_cal
elif isinstance(input_cal, collections.Iterable): # List loading
if np.all([isinstance(ic, str) for ic in input_cal]): # List of calfits paths
return HERACal(input_cal)
elif np.all([isinstance(ic, (UVCal, HERACal)) for ic in input_cal]): # List of UVCal/HERACal objects
hc = reduce(operator.add, input_cal)
hc.__class__ = HERACal
return hc
else:
raise TypeError('If input is a list, it must be only strings or only UVCal/HERACal objects.')
else:
raise TypeError('Input must be a UVCal/HERACal object, a string, or a list of either.')
def load_cal(input_cal, return_meta=False):
'''Load calfits files or UVCal/HERACal objects into dictionaries, optionally
returning the most useful metadata. More than one spectral window is not supported.
Arguments:
input_cal: path to calfits file, UVCal/HERACal object, or a list of either
return_meta: if True, returns additional information (see below)
Returns:
if return_meta is True:
(gains, flags, quals, total_qual, ants, freqs, times, pols)
else:
(gains, flags)
gains: Dictionary of complex calibration gains as a function of time
and frequency with keys in the (1,'x') format
flags: Dictionary of flags in the same format as the gains
quals: Dictionary of of qualities of calibration solutions in the same
format as the gains (e.g. omnical chi^2 per antenna)
total_qual: ndarray of total calibration quality for the whole array
(e.g. omnical overall chi^2)
ants: ndarray containing unique antenna indices
freqs: ndarray containing frequency channels (Hz)
times: ndarray containing julian date bins of data
pols: list of antenna polarization strings
'''
# load HERACal object and extract gains, data, etc.
hc = to_HERACal(input_cal)
if hc.gain_array is not None:
gains, flags, quals, total_qual = hc.build_calcontainers()
else:
gains, flags, quals, total_qual = hc.read()
# return quantities
if return_meta:
return gains, flags, quals, total_qual, np.array([ant[0] for ant in hc.ants]), hc.freqs, hc.times, hc.pols
else:
return gains, flags
def write_cal(fname, gains, freqs, times, flags=None, quality=None, total_qual=None, write_file=True,
return_uvc=True, outdir='./', overwrite=False, gain_convention='divide',
history=' ', x_orientation="east", telescope_name='HERA', cal_style='redundant',
**kwargs):
'''Format gain solution dictionary into pyuvdata.UVCal and write to file
Arguments:
fname : type=str, output file basename
gains : type=dictionary, holds complex gain solutions. keys are antenna + pol
tuple pairs, e.g. (2, 'x'), and keys are 2D complex ndarrays with time
along [0] axis and freq along [1] axis.
freqs : type=ndarray, holds unique frequencies channels in Hz
times : type=ndarray, holds unique times of integration centers in Julian Date
flags : type=dictionary, holds boolean flags (True if flagged) for gains.
Must match shape of gains.
quality : type=dictionary, holds "quality" of calibration solution. Must match
shape of gains. See pyuvdata.UVCal doc for more details.
total_qual : type=dictionary, holds total_quality_array. Key(s) are polarization
string(s) and values are 2D (Ntimes, Nfreqs) ndarrays.
write_file : type=bool, if True, write UVCal to calfits file
return_uvc : type=bool, if True, return UVCal object
outdir : type=str, output file directory
overwrite : type=bool, if True overwrite output files
gain_convention : type=str, gain solutions formatted such that they 'multiply' into data
to get model, or 'divide' into data to get model
options=['multiply', 'divide']
history : | |
<reponame>KaushikSathvara/django<gh_stars>1-10
import hashlib
import json
import os
import posixpath
import re
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%(url)s")"""
max_post_process_passes = 5
patterns = (
("*.css", (
r"""(?P<matched>url\(['"]{0,1}\s*(?P<url>.*?)["']{0,1}\))""",
(
r"""(?P<matched>@import\s*["']\s*(?P<url>.*?)["'])""",
"""@import url("%(url)s")""",
),
)),
('*.js', (
(
r'(?P<matched>)^(//# (?-i:sourceMappingURL)=(?P<url>.*))$',
'//# sourceMappingURL=%(url)s',
),
(
r"""(?P<matched>import\s+(?s:(?P<imports>.*?))\s*from\s*["'](?P<url>.*?)["'])""",
'import %(imports)s from "%(url)s"',
),
(
r"""(?P<matched>export\s+(?s:(?P<exports>.*?))\s*from\s*["'](?P<url>.*?)["'])""",
'export %(exports)s from "%(url)s"',
),
(r"""(?P<matched>import\(["'](?P<url>.*?)["']\))""", 'import("%(url)s")'),
)),
)
keep_intermediate_files = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = {}
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name
opened = content is None
if opened:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except OSError:
# Handle directory paths and fragments
return name
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
file_hash = ('.%s' % file_hash) if file_hash else ''
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matches = matchobj.groupdict()
matched = matches['matched']
url = matches['url']
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
matches['url'] = unquote(transformed_url)
return template % matches
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given dictionary of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = {}
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
# Adjustable files to yield at end, keyed by the original path.
processed_adjustable_paths = {}
# Do a single pass first. Post-process all files once, yielding not
# adjustable files and exceptions, and collecting adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
if name not in adjustable_paths or isinstance(processed, Exception):
yield name, hashed_name, processed
else:
processed_adjustable_paths[name] = (name, hashed_name, processed)
paths = {path: paths[path] for path in adjustable_paths}
substitutions = False
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
# Yield adjustable files with final, hashed name.
yield from processed_adjustable_paths.values()
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode('utf-8')
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(content.encode())
if self.keep_intermediate_files:
# Save intermediate file for reference
self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to | |
import math
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import torch
import torchvision
import numpy as np
from torchvision.datasets.folder import default_loader
from PIL import Image
def level2val(level, value_range, val_type='float'):
v = value_range[0] + level * float(value_range[1] - value_range[0])
return v
class Transform(object):
def __init__(self, value_range=None, name=None, prob=1.0, level=0):
self.name = name if name is not None else type(self).__name__
self.prob = prob
if level < 0 or level > 1:
raise ValueError('level must be in [0, 1]')
self.level = level
self.value_range = value_range
def transform(self, img, label, **kwargs):
return img, label
def __call__(self, img, label, **kwargs):
if random.random() <= self.prob:
return self.transform(img, label, **kwargs)
else:
return img, label
def __repr__(self):
return f'<Transform ({self.name}, prob={self.prob}, level={self.level})>'
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, label, **kwargs):
for idx, t in enumerate(self.transforms):
kwargs['idx'] = idx
img, label = t(img, label, **kwargs)
return img, label
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ShearX(Transform):
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)), label
class ShearY(Transform):
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)), label
class TranslateX(Transform): # [-150, 150] => percentage: [-0.45, 0.45]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)), label
class TranslateXabs(Transform): # [-150, 150] => percentage: [-0.45, 0.45]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)), label
class TranslateY(Transform): # [-150, 150] => percentage: [-0.45, 0.45]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)), label
class TranslateYabs(Transform): # [-150, 150] => percentage: [-0.45, 0.45]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)), label
class Rotate(Transform): # [-30, 30]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
if random.random() > 0.5:
v = -v
return img.rotate(v), label
class AutoContrast(Transform):
def transform(self, img, label, **kwargs):
return PIL.ImageOps.autocontrast(img), label
class Invert(Transform):
def transform(self, img, label, **kwargs):
return PIL.ImageOps.invert(img), label
class Equalize(Transform):
def transform(self, img, label, **kwargs):
return PIL.ImageOps.equalize(img), label
class FlipLR(Transform): # not from the paper
def transform(self, img, label, **kwargs):
return img.transpose(Image.FLIP_LEFT_RIGHT), label
class FlipUD(Transform): # not from the paper
def transform(self, img, label, **kwargs):
return img.transpose(Image.FLIP_TOP_BOTTOM), label
class Blur(Transform): # not from the paper
def transform(self, img, label, **kwargs):
return img.filter(PIL.ImageFilter.BLUR), label
class Smooth(Transform): # not from the paper
def transform(self, img, label, **kwargs):
return img.filter(PIL.ImageFilter.SMOOTH), label
class CropBilinear(Transform):
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
v = v * img.size[0]
size0, size1 = img.size[0], img.size[1]
cropped = img.crop((v, v, size0 - v, size1 - v))
resized = cropped.resize((size0, size1), Image.BILINEAR)
return resized, label
class Flip(Transform): # not from the paper
def transform(self, img, label, **kwargs):
return PIL.ImageOps.mirror(img), label
class Solarize(Transform): # [0, 256]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
return PIL.ImageOps.solarize(img, v), label
class SolarizeAdd(Transform):
def transform(self, img, label, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold), label
class Posterize(Transform): # [4, 8]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v), label
class Contrast(Transform): # [0.1,1.9]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
return PIL.ImageEnhance.Contrast(img).enhance(v), label
class Color(Transform): # [0.1,1.9]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
return PIL.ImageEnhance.Color(img).enhance(v), label
class Brightness(Transform): # [0.1,1.9]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
return PIL.ImageEnhance.Brightness(img).enhance(v), label
class Sharpness(Transform): # [0.1,1.9]
def transform(self, img, label, **kwargs):
v = level2val(self.level, self.value_range)
return PIL.ImageEnhance.Sharpness(img).enhance(v), label
class Cutout(Transform): # [0, 60] => percentage: [0, 0.2]
def transform(self, img, label, **kwargs):
img = img.copy()
v = level2val(self.level, self.value_range)
if v <= 0.:
return img
v = v * img.size[0]
width, height = img.size
x0 = np.random.uniform(width)
y0 = np.random.uniform(height)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(width, x0 + v)
y1 = min(height, y0 + v)
xy = (x0, y0, x1, y1)
if img.mode == "RGB":
color = (125, 123, 114)
elif img.mode == "L":
color = 121
else:
raise ValueError(f"Unspported image mode {img.mode}")
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img, label
class Identity(Transform):
def transform(self, img, label, **kwargs):
return img, label
class ToTensor(Transform):
def transform(self, img, label, **kwargs):
return torchvision.transforms.ToTensor()(img), label
class Lighting(Transform):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec, **kwargs):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
super().__init__(**kwargs)
def transform(self, img, label, **kwargs):
if self.alphastd == 0:
return img, label
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img)), label
class Normalize(Transform):
def __init__(self, mean, std, **kwargs):
self.mean = mean,
self.std = std
super(Normalize, self).__init__(**kwargs)
self.normalize_func = torchvision.transforms.Normalize(mean, std)
def transform(self, img, label, **kwargs):
return self.normalize_func(img), label
class EfficientNetCenterCrop(Transform):
def __init__(self, imgsize, **kwargs):
self.imgsize = imgsize
super(EfficientNetCenterCrop, self).__init__(**kwargs)
def transform(self, img, label, **kwargs):
image_width, image_height = img.size
image_short = min(image_width, image_height)
crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short
crop_height, crop_width = crop_size, crop_size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)), label
class EfficientNetRandomCrop(Transform):
def __init__(self, imgsize, min_covered=0.1, aspect_ratio_range=(3./4, 4./3),
area_range=(0.08, 1.0), max_attempts=10, **kwargs):
assert 0.0 < min_covered
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
assert 0 < area_range[0] <= area_range[1]
assert 1 <= max_attempts
self.min_covered = min_covered
self.aspect_ratio_range = aspect_ratio_range
self.area_range = area_range
self.max_attempts = max_attempts
self._fallback = EfficientNetCenterCrop(imgsize, prob=1.0)
super(EfficientNetRandomCrop, self).__init__(**kwargs)
def transform(self, img, label, **kwargs):
original_width, original_height = img.size
min_area = self.area_range[0] * (original_width * original_height)
max_area = self.area_range[1] * (original_width * original_height)
for _ in range(self.max_attempts):
aspect_ratio = random.uniform(*self.aspect_ratio_range)
height = int(round(math.sqrt(min_area / aspect_ratio)))
max_height = int(round(math.sqrt(max_area / aspect_ratio)))
if max_height * aspect_ratio > original_width:
max_height = (original_width + 0.5 - 1e-7) / aspect_ratio
max_height = int(max_height)
if max_height * aspect_ratio > original_width:
max_height -= 1
if max_height > original_height:
max_height = original_height
if height >= max_height:
height = max_height
height = int(round(random.uniform(height, max_height)))
width = int(round(height * aspect_ratio))
area = width * height
if area < min_area or area > max_area:
continue
if width > original_width or height > original_height:
continue
if area < self.min_covered * (original_width * original_height):
continue
if width == original_width and height == original_height:
return self._fallback(img, label, **kwargs)
x = random.randint(0, original_width - width)
y = random.randint(0, original_height - height)
return img.crop((x, y, x + width, y + height)), label
return self._fallback(img, label, **kwargs)
class RandomResizeCrop(Transform):
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR, **kwargs):
self.transform_func = torchvision.transforms.RandomResizedCrop(size, scale, ratio, interpolation)
super().__init__(**kwargs)
def transform(self, img, label, **kwargs):
return self.transform_func(img), label
class Resize(Transform):
def __init__(self, size, interpolation=Image.BILINEAR, **kwargs):
self.transform_func = torchvision.transforms.Resize(size, interpolation)
super().__init__(**kwargs)
def transform(self, img, label, **kwargs):
return self.transform_func(img), label
class ColorJitter(Transform):
def __init__(self, brightness, contrast, saturation, **kwargs):
super().__init__(**kwargs)
self.transform_func = torchvision.transforms.ColorJitter(brightness, contrast, saturation)
def transform(self, img, label, **kwargs):
return self.transform_func(img), label
class RandomCrop(Transform):
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant', **kwargs):
self.transform_func = torchvision.transforms.RandomCrop(size, padding,
pad_if_needed, fill,
padding_mode)
super().__init__(**kwargs)
def transform(self, img, label, **kwargs):
return self.transform_func(img), label
class HorizontalFlip(Transform):
def transform(self, img, label, **kwargs):
return img.transpose(Image.FLIP_LEFT_RIGHT), label
class SamplePair(Transform):
def __init__(
self,
value_range=None,
name=None,
prob=1.0,
level=0,
alpha=1.0,
same_class_ratio=-1.0,
prob_label=False,
):
self.alpha = alpha
self.same_class_ratio = same_class_ratio
self.prob_label = prob_label
super().__init__(value_range, name, prob, level)
def transform(self, img, label, **kwargs):
data = kwargs['data']
targets = kwargs['targets']
transforms = kwargs["transforms"]
num_classes = kwargs["num_classes"]
if self.alpha > 0.0:
mix_ratio = np.random.beta(self.alpha, self.alpha)
else:
mix_ratio = 1.0
tot_cnt = | |
"""
Tests for dataset creation
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import tempfile
import os
import shutil
import numpy as np
import deepchem as dc
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_sparsify_and_densify(self):
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features(self):
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches(self):
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names(self):
"""Test that get_task_names returns correct task_names"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = dc.data.tests.load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape(self):
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = dc.data.tests.load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len(self):
"""Test that len(dataset) works."""
solubility_dataset = dc.data.tests.load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard(self):
"""Test that resharding the dataset works."""
solubility_dataset = dc.data.tests.load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_select(self):
"""Test that dataset select works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [0, 4, 5, 8]
select_dataset = dataset.select(indices)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_get_shape(self):
"""Test that get_shape works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_iterbatches(self):
"""Test that iterating over batches of data works."""
solubility_dataset = dc.data.tests.load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy(self):
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
for i, (sx, sy, sw, sid) in enumerate(dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_itersamples_disk(self):
"""Test that iterating over samples in a DiskDataset works."""
solubility_dataset = dc.data.tests.load_solubility_data()
X = solubility_dataset.X
y = solubility_dataset.y
w = solubility_dataset.w
ids = solubility_dataset.ids
for i, (sx, sy, sw, sid) in enumerate(solubility_dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_transform_numpy(self):
"""Test that the transform() method works for NumpyDatasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Transform it
def fn(x, y, w):
return (2 * x, 1.5 * y, w)
transformed = dataset.transform(fn)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_transform_disk(self):
"""Test that the transform() method works for DiskDatasets."""
dataset = dc.data.tests.load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
def fn(x, y, w):
return (2 * x, 1.5 * y, w)
transformed = dataset.transform(fn)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_to_numpy(self):
"""Test that transformation to numpy arrays is sensible."""
solubility_dataset = dc.data.tests.load_solubility_data()
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
N_samples = len(solubility_dataset)
N_tasks = len(tasks)
assert X.shape == (N_samples,) + data_shape
assert y.shape == (N_samples, N_tasks)
assert w.shape == (N_samples, N_tasks)
assert ids.shape == (N_samples,)
def test_consistent_ordering(self):
"""Test that ordering of labels is consistent over time."""
solubility_dataset = dc.data.tests.load_solubility_data()
ids1 = solubility_dataset.ids
ids2 = solubility_dataset.ids
assert np.array_equal(ids1, ids2)
def test_get_statistics(self):
"""Test statistics computation of this dataset."""
solubility_dataset = dc.data.tests.load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
X_means, y_means = | |
leave some symbols undefined.
TODO: right now this procedure is guided by the model. Maybe
it should be flipped and be guided by the vocabulary of the
program and num_states, asserting that it finds everything needed in the
model.
'''
if isinstance(z3model, CVC4Model):
return Z3Translator._old_model_to_trace(z3model, num_states, allow_undefined)
struct = Z3Translator.model_to_first_order_structure(z3model)
trace = Trace(num_states)
keys = Z3Translator._get_keys(num_states)
prog = syntax.the_program
for k, v in struct.univs.items(): # TODO: maybe should sort here, not sure
sort = prog.scope.get_sort(k.name)
assert sort is not None, k
trace.univs[sort] = v
for d, interp in chain( # TODO: maybe should sort here, not sure
struct.rel_interps.items(),
struct.const_interps.items(),
struct.func_interps.items()
):
assert isinstance(d, (RelationDecl, ConstantDecl, FunctionDecl))
for i, key in enumerate(keys):
if d.name.startswith(prefix := key + '_'):
name = d.name[len(prefix):]
R = trace.rel_interps[i]
C = trace.const_interps[i]
F = trace.func_interps[i]
break
else:
name = d.name
R = trace.immut_rel_interps
C = trace.immut_const_interps
F = trace.immut_func_interps
decl = prog.scope.get(name)
assert not isinstance(decl, Sort), (d, decl)
assert not isinstance(decl, SortInferencePlaceholder), (d, decl)
if decl is None:
if name.startswith(prefix := TRANSITION_INDICATOR + '_'):
assert isinstance(d, RelationDecl) and len(d.arity) == 0
interp = cast(RelationInterp, interp)
if interp[()]:
name = name[len(prefix):]
istr, name = name.split('_', maxsplit=1)
i = int(istr)
if i < len(trace.transitions):
trace.transitions[i] = name
else:
# TODO: not sure what's going on here with check_bmc and pd.check_k_state_implication
# assert False
pass
elif isinstance(decl, RelationDecl):
interp = cast(RelationInterp, interp)
R[decl] = interp
elif isinstance(decl, FunctionDecl):
interp = cast(FunctionInterp, interp)
F[decl] = interp
elif isinstance(decl, ConstantDecl):
interp = cast(Element, interp)
C[decl] = interp
else:
assert False, (d, decl)
if allow_undefined:
return trace
def get_univ(d: SortDecl) -> Tuple[Element, ...]:
if d not in trace.univs:
trace.univs[d] = (d.name + '0',)
return trace.univs[d]
def arbitrary_interp_r(r: RelationDecl) -> RelationInterp:
doms = [get_univ(syntax.get_decl_from_sort(s)) for s in r.arity]
return dict.fromkeys(product(*doms), False)
def ensure_defined_r(r: RelationDecl) -> None:
arb_interp: Optional[RelationInterp] = None
for m in (trace.rel_interps if r.mutable else [trace.immut_rel_interps]):
if r not in m:
if arb_interp is None:
arb_interp = arbitrary_interp_r(r)
m[r] = arb_interp
def arbitrary_interp_c(c: ConstantDecl) -> Element:
if isinstance(c.sort, syntax._BoolSort):
return 'false'
elif isinstance(c.sort, syntax._IntSort):
return '0'
assert isinstance(c.sort, syntax.UninterpretedSort)
sort = c.sort
return get_univ(syntax.get_decl_from_sort(sort))[0]
def ensure_defined_c(c: ConstantDecl) -> None:
arb_interp = arbitrary_interp_c(c)
for m in (trace.const_interps if c.mutable else [trace.immut_const_interps]):
if c not in m:
m[c] = arb_interp
def arbitrary_interp_f(f: FunctionDecl) -> FunctionInterp:
doms = [get_univ(syntax.get_decl_from_sort(s)) for s in f.arity]
image = get_univ(syntax.get_decl_from_sort(f.sort))[0]
return dict.fromkeys(product(*doms), image)
def ensure_defined_f(f: FunctionDecl) -> None:
arb_interp: Optional[FunctionInterp] = None
for m in (trace.func_interps if f.mutable else [trace.immut_func_interps]):
if f not in m:
if arb_interp is None:
arb_interp = arbitrary_interp_f(f)
m[f] = arb_interp
for decl in prog.relations_constants_and_functions():
if isinstance(decl, RelationDecl):
ensure_defined_r(decl)
elif isinstance(decl, ConstantDecl):
ensure_defined_c(decl)
elif isinstance(decl, FunctionDecl):
ensure_defined_f(decl)
else:
assert False, decl
return trace
@staticmethod
def model_to_first_order_structure(z3model: z3.ModelRef) -> FirstOrderStructure:
'''
Convert z3 model to a BareFirstOrderStructure.
Note that all declarations of the bare structure are not
related to the program's declarations.
'''
assert isinstance(z3model, z3.ModelRef), f'{type(z3model)}\n{z3model}'
struct = BareFirstOrderStructure({}, {}, {}, {})
# create universe
sorts: Dict[str, Sort] = {
'Bool': BoolSort,
'Int': IntSort,
}
sort_decls: Dict[Sort, SortDecl] = {} # TODO: remove once Universe maps sorts and not SortDecls
elements: Dict[Tuple[Sort, z3.ExprRef], Element] = {}
z3elements: Dict[Tuple[Sort, Element], z3.ExprRef] = {}
for z3sort in sorted(z3model.sorts(), key=str):
z3elems = sorted(z3model.get_universe(z3sort), key=str)
name = z3sort.name()
sort = UninterpretedSort(name)
sort.decl = SortDecl(name)
sorts[sort.name] = sort
sort_decls[sort] = sort.decl
struct.univs[sort.decl] = ()
for i, x in enumerate(z3elems):
e = f'{sort.name}{i}' # TODO: someday this will just be i
assert (sort, x) not in elements, (sort, i, x, e)
elements[sort, x] = e
assert (sort, e) not in z3elements, (sort, i, x, e)
z3elements[sort, e] = x
struct.univs[sort.decl] += (e,)
# interpret relations, constants, functions
def _eval_bool(expr: z3.ExprRef) -> bool:
assert z3.is_bool(expr), expr
ans = z3model.eval(expr, model_completion=True)
assert z3.is_bool(ans), (expr, ans)
return bool(ans)
def _eval_int(expr: z3.ExprRef) -> str: # TODO: this should return int
assert z3.is_int(expr), expr
ans = z3model.eval(expr, model_completion=True)
assert z3.is_int_value(ans), (expr, ans)
return str(ans.as_long())
def _eval_elem(sort: Sort) -> Callable[[z3.ExprRef], Element]:
def _eval(expr: z3.ExprRef) -> Element:
assert sorts[expr.sort().name()] is sort, expr
ans = z3model.eval(expr, model_completion=True)
assert (sort, ans) in elements, (sort, expr, ans)
return elements[sort, ans]
return _eval
for z3decl in sorted(z3model.decls(), key=str):
name = z3decl.name()
dom = tuple(
sorts[z3decl.domain(i).name()]
for i in range(z3decl.arity())
)
rng = sorts[z3decl.range().name()]
decl: Union[RelationDecl, ConstantDecl, FunctionDecl]
if rng is BoolSort:
decl = RelationDecl(name, tuple(dom), mutable=False)
elif len(dom) == 0:
decl = ConstantDecl(name, rng, mutable=False)
else:
decl = FunctionDecl(name, tuple(dom), rng, mutable=False)
_eval: Callable[[z3.ExprRef], Union[bool, int, Element]]
if rng is BoolSort:
_eval = _eval_bool
elif rng is IntSort:
_eval = _eval_int
elif isinstance(rng, UninterpretedSort):
_eval = _eval_elem(rng)
else:
assert False, (decl, rng)
domains = [struct.univs[sort_decls[sort]] for sort in dom]
fi = {
row: _eval(z3decl(*(
z3elements[sort, e]
for sort, e in zip(dom, row)
)))
for row in product(*domains)
}
if isinstance(decl, RelationDecl):
assert decl not in struct.rel_interps
assert all(isinstance(v, bool) for v in fi.values())
assert all(
len(k) == len(dom) and
all(e in struct.univs[sort_decls[sort]] for e, sort in zip(k, dom))
for k in fi.keys()
)
struct.rel_interps[decl] = cast(RelationInterp, fi)
elif isinstance(decl, FunctionDecl):
assert decl not in struct.func_interps
assert all(isinstance(v, Element) for v in fi.values())
if isinstance(rng, UninterpretedSort):
assert all(v in struct.univs[sort_decls[rng]] for v in fi.values())
elif rng is IntSort:
assert all(isinstance(int(v), int) for v in fi.values())
else:
assert False, (decl, rng)
assert all(
len(k) == len(dom) and
all(e in struct.univs[sort_decls[sort]] for e, sort in zip(k, dom))
for k in fi.keys()
)
struct.func_interps[decl] = cast(FunctionInterp, fi)
elif isinstance(decl, ConstantDecl):
assert decl not in struct.const_interps
assert list(fi.keys()) == [()]
v = fi[()]
assert isinstance(v, Element)
if isinstance(rng, UninterpretedSort):
assert v in struct.univs[sort_decls[rng]]
elif rng is IntSort:
assert isinstance(int(v), int)
else:
assert False, (decl, rng)
struct.const_interps[decl] = cast(Element, fi[()])
return struct
@staticmethod
def sort_to_z3(s: Union[syntax.Sort, syntax.SortDecl]) -> z3.SortRef:
if isinstance(s, syntax.UninterpretedSort):
assert s.decl is not None, str(s)
s = s.decl
if isinstance(s, syntax.SortDecl):
if s.z3 is None:
s.z3 = z3.DeclareSort(s.name)
return s.z3
elif isinstance(s, syntax._BoolSort):
return z3.BoolSort()
elif isinstance(s, syntax._IntSort):
return z3.IntSort()
else:
assert False
@staticmethod
def function_to_z3(f: syntax.FunctionDecl, key: Optional[str]) -> z3.FuncDeclRef:
if f.mutable:
assert key is not None
if key not in f.mut_z3:
a = [Z3Translator.sort_to_z3(s) for s in f.arity] + [Z3Translator.sort_to_z3(f.sort)]
f.mut_z3[key] = z3.Function(key + '_' + f.name, *a)
return f.mut_z3[key]
else:
if f.immut_z3 is None:
a = [Z3Translator.sort_to_z3(s) for s in f.arity] + [Z3Translator.sort_to_z3(f.sort)]
f.immut_z3 = z3.Function(f.name, *a)
return f.immut_z3
@staticmethod
def relation_to_z3(r: syntax.RelationDecl, key: Optional[str]) -> Union[z3.FuncDeclRef, z3.ExprRef]:
if r.mutable:
assert key is not None
if key not in r.mut_z3:
if r.arity:
a = [Z3Translator.sort_to_z3(s) for s in r.arity] + [z3.BoolSort()]
r.mut_z3[key] = z3.Function(key + '_' + r.name, *a)
else:
r.mut_z3[key] = z3.Const(key + '_' + r.name, z3.BoolSort())
return r.mut_z3[key]
else:
if r.immut_z3 is None:
if r.arity:
a = [Z3Translator.sort_to_z3(s) for s in r.arity] + [z3.BoolSort()]
r.immut_z3 = z3.Function(r.name, *a)
else:
r.immut_z3 = z3.Const(r.name, z3.BoolSort())
return r.immut_z3
@staticmethod
def constant_to_z3(c: syntax.ConstantDecl, key: Optional[str]) -> z3.ExprRef:
if c.mutable:
assert key is not None
if key not in c.mut_z3:
c.mut_z3[key] = z3.Const(key + '_' + c.name, Z3Translator.sort_to_z3(c.sort))
return c.mut_z3[key]
else:
if c.immut_z3 is None:
c.immut_z3 = z3.Const(c.name, Z3Translator.sort_to_z3(c.sort))
return c.immut_z3
@staticmethod
def statedecl_to_z3(d: syntax.StateDecl, key: Optional[str]) -> Union[z3.FuncDeclRef, z3.ExprRef]:
if isinstance(d, syntax.RelationDecl):
return Z3Translator.relation_to_z3(d, key)
elif isinstance(d, syntax.ConstantDecl):
return Z3Translator.constant_to_z3(d, key)
elif isinstance(d, syntax.FunctionDecl):
return Z3Translator.function_to_z3(d, key)
else:
assert False, d
@staticmethod
def sort_from_z3sort(prog: Program, z3sort: z3.SortRef) -> SortDecl:
return prog.scope.get_sort_checked(str(z3sort))
# ODED: I think the functions below should be implemented in logic.py or elsewhere, independent of z3
def qa_edges_expr(prog: Program, expr: Expr) -> Iterator[Tuple[str, str]]:
lator = Z3Translator(cast(Scope[z3.ExprRef], prog.scope), 0)
z3expr = lator.translate_expr(expr)
for (ssortz3, tsortz3) in z3_quantifier_alternations(z3expr):
# TODO: consider overriding equals instead of using the names
yield (Z3Translator.sort_from_z3sort(prog, ssortz3).name,
Z3Translator.sort_from_z3sort(prog, tsortz3).name)
def quantifier_alternation_graph(prog: Program, exprs: List[Expr]) -> DiGraph:
qa_graph = DiGraph()
for expr in exprs:
qa_graph.add_edges_from(qa_edges_expr(prog, expr))
return qa_graph
def decls_quantifier_alternation_graph(prog: Program, additional: List[Expr]) -> DiGraph:
res = quantifier_alternation_graph(prog,
[axiom.expr for axiom in prog.axioms()] +
[cast(Expr, rel.derived_axiom) for rel in prog.derived_relations()] +
additional)
for | |
blob's tier determines the allowed size, IOPS, and bandwidth of
the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not
update the blob's ETag.
:param tier: Indicates the tier to be set on the blob.
:type tier: str or ~azure.storage.blob.models.AccessTierRequired
:param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
see :code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
a Snapshot of a Blob.</a>`.
:type snapshot: str
:param version_id: The version id parameter is an opaque DateTime value that, when present,
specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
:type version_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived
blob.
:type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_if_tags = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_tags = modified_access_conditions.if_tags
comp = "tier"
accept = "application/xml"
# Construct URL
url = self.set_tier.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if snapshot is not None:
query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
if version_id is not None:
query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
if rehydrate_priority is not None:
header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
if response.status_code == 200:
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if response.status_code == 202:
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore
def get_account_info(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Returns the sku name and account kind.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "account"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_account_info.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))
response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))
if cls:
return cls(pipeline_response, None, response_headers)
get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore
def query(
self,
snapshot=None, # type: Optional[str]
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
query_request=None, # type: Optional["_models.QueryRequest"]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
cpk_info=None, # type: Optional["_models.CpkInfo"]
modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> IO
"""The Query operation enables users to select/project on blob data by providing simple query
expressions.
:param snapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the blob snapshot to retrieve. For more information on working with blob snapshots,
see :code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
a Snapshot of a Blob.</a>`.
:type snapshot: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param query_request: the query request.
:type query_request: ~azure.storage.blob.models.QueryRequest
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions
:param cpk_info: Parameter group.
:type cpk_info: ~azure.storage.blob.models.CpkInfo
:param modified_access_conditions: Parameter group.
:type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
_encryption_key = None
_encryption_key_sha256 = None
_encryption_algorithm = None
_if_modified_since = None
_if_unmodified_since = None
_if_match = None
_if_none_match = None
_if_tags = None
if cpk_info is not None:
_encryption_key = cpk_info.encryption_key
_encryption_key_sha256 = cpk_info.encryption_key_sha256
_encryption_algorithm = cpk_info.encryption_algorithm
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
if modified_access_conditions is not None:
_if_modified_since = modified_access_conditions.if_modified_since
_if_unmodified_since = modified_access_conditions.if_unmodified_since
_if_match = modified_access_conditions.if_match
_if_none_match = modified_access_conditions.if_none_match
_if_tags = modified_access_conditions.if_tags
comp = "query"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.query.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if snapshot is not None:
query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if _encryption_key is not None:
header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str')
if _encryption_key_sha256 is not None:
header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str')
if _encryption_algorithm is not None:
header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str')
if _if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
if _if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
if _if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
if _if_tags is not None:
header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if query_request is not None:
body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True)
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 206]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
if response.status_code == 200:
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta'))
response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number'))
response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type'))
response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time'))
response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description'))
response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id'))
response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress'))
response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source'))
response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status'))
response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count'))
response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted'))
response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256'))
response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope'))
response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5'))
deserialized = response.stream_download(self._client._pipeline)
if response.status_code == | |
<filename>rbf/linalg.py
'''
Module for linear algebra routines.
'''
import logging
import warnings
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from scipy.linalg.lapack import (dpotrf, dpotrs, dtrtrs, dgetrf,
dgetrs)
from rbf.sputils import row_norms, divide_rows
LOGGER = logging.getLogger(__name__)
try:
from sksparse import cholmod
HAS_CHOLMOD = True
except ImportError:
HAS_CHOLMOD = False
CHOLMOD_MSG = (
'Could not import CHOLMOD. Sparse matrices will be converted to dense for '
'all Cholesky decompositions. To install CHOLMOD and its python wrapper, '
'follow the instructions at https://scikit-sparse.readthedocs.io. '
'Anaconda users can install CHOLMOD with the command `conda install -c '
'conda-forge scikit-sparse`')
LOGGER.debug(CHOLMOD_MSG)
## Wrappers for low level LAPACK functions
#####################################################################
def _lu(A):
'''
Computes the LU factorization of `A` using the routine `dgetrf`
Parameters
----------
A : (n, n) float array
Returns
-------
(n, n) float array
LU factorization
(n,) int array
pivots
'''
# handle rank zero matrix
if A.shape == (0, 0):
return (np.zeros((0, 0), dtype=float),
np.zeros((0,), dtype=np.int32))
# get the LU factorization
fac, piv, info = dgetrf(A)
if info < 0:
raise ValueError('the %s-th argument had an illegal value' % -info)
elif info > 0:
raise np.linalg.LinAlgError(
'U(%s, %s) is exactly zero. The factorization has been completed, but '
'the factor U is exactly singular, and division by zero will occur if '
'it is used to solve a system of equations. ' % (info, info))
return fac, piv
def _solve_lu(fac, piv, b):
'''
Solves the system of equations `Ax = b` given the LU factorization of `A`.
Uses the `dgetrs` routine.
Parameters
----------
fac : (n, n) float array
piv : (n,) int array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
# handle the case of an array with zero-length for an axis.
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dgetrs(fac, piv, b)
if info != 0:
raise ValueError('the %s-th argument had an illegal value' % -info)
return x
def _cholesky(A, lower=True):
'''
Computes the Cholesky decomposition of `A` using the routine `dpotrf`.
Parameters
----------
A : (n, n) float array
lower : bool, optional
Returns
-------
(n, n) float array
'''
# handle rank zero matrix
if A.shape == (0, 0):
return np.zeros((0, 0), dtype=float)
L, info = dpotrf(A, lower=lower)
if info > 0:
raise np.linalg.LinAlgError(
'The leading minor of order %s is not positive definite, and the '
'factorization could not be completed. ' % info)
elif info < 0:
raise ValueError('The %s-th argument has an illegal value.' % -info)
return L
def _solve_cholesky(L, b, lower=True):
'''
Solves the system of equations `Ax = b` given the Cholesky decomposition of
`A`. Uses the routine `dpotrs`.
Parameters
----------
L : (n, n) float array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dpotrs(L, b, lower=lower)
if info < 0:
raise ValueError('The %s-th argument has an illegal value.' % -info)
return x
def _solve_triangular(L, b, lower=True):
'''
Solve the triangular system of equations `Lx = b` using `dtrtrs`.
Parameters
----------
L : (n, n) float array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dtrtrs(L, b, lower=lower)
if info < 0:
raise ValueError('The %s-th argument had an illegal value' % (-info))
elif info > 0:
raise np.linalg.LinAlgError(
'The %s-th diagonal element of A is zero, indicating that the matrix is '
'singular and the solutions X have not been computed.' % info)
return x
#####################################################################
def as_sparse_or_array(A, dtype=None, copy=False):
'''
If `A` is a scipy sparse matrix then return it as a csc matrix. Otherwise,
return it as an array.
'''
if sp.issparse(A):
# This does not make a copy if A is csc, has the same dtype and copy is
# false.
A = sp.csc_matrix(A, dtype=dtype, copy=copy)
else:
A = np.array(A, dtype=dtype, copy=copy)
return A
def as_array(A, dtype=None, copy=False):
'''
Return `A` as an array if it is not already. This properly handles when `A`
is sparse.
'''
if sp.issparse(A):
A = A.toarray()
A = np.array(A, dtype=dtype, copy=copy)
return A
class _SparseSolver(object):
'''
computes the LU factorization of the sparse matrix `A` with SuperLU.
'''
def __init__(self, A):
LOGGER.debug(
'computing the LU decomposition of a %s by %s sparse matrix with %s '
'nonzeros ' % (A.shape + (A.nnz,)))
self.factor = spla.splu(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return self.factor.solve(b)
class _DenseSolver(object):
'''
computes the LU factorization of the dense matrix `A`.
'''
def __init__(self, A):
fac, piv = _lu(A)
self.fac = fac
self.piv = piv
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return _solve_lu(self.fac, self.piv, b)
class Solver(object):
'''
Computes an LU factorization of `A` and provides a method to solve `Ax = b`
for `x`. `A` can be a scipy sparse matrix or a numpy array.
Parameters
----------
A : (n, n) array or scipy sparse matrix
'''
def __init__(self, A):
A = as_sparse_or_array(A, dtype=float)
if sp.issparse(A):
self._solver = _SparseSolver(A)
else:
self._solver = _DenseSolver(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
Parameters
----------
b : (n, *) array or sparse matrix
Returns
-------
(n, *) array
'''
b = as_array(b, dtype=float)
return self._solver.solve(b)
class _SparsePosDefSolver(object):
'''
Factors the sparse positive definite matrix `A` as `LL^T = A`. Note that `L`
is NOT necessarily the lower triangular matrix from a Cholesky decomposition.
Instead, it is structured to be maximally sparse. This class requires
CHOLMOD.
'''
def __init__(self, A):
LOGGER.debug(
'computing the Cholesky decomposition of a %s by %s sparse matrix with '
'%s nonzeros ' % (A.shape + (A.nnz,)))
self.factor = cholmod.cholesky(
A,
use_long=False,
ordering_method='default')
# store the squared diagonal components of the cholesky factorization
self.d = self.factor.D()
# store the permutation array, which permutes `A` such that its cholesky
# factorization is maximally sparse
self.p = self.factor.P()
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return self.factor.solve_A(b)
def solve_L(self, b):
'''
Solves `Lx = b` for `x`
'''
if b.ndim == 1:
s_inv = 1.0/np.sqrt(self.d)
elif b.ndim == 2:
# expand for broadcasting
s_inv = 1.0/np.sqrt(self.d)[:, None]
else:
raise ValueError('`b` must be a one or two dimensional array')
out = s_inv*self.factor.solve_L(b[self.p])
return out
def L(self):
'''Return the factorization `L`'''
L = self.factor.L()
p_inv = np.argsort(self.p)
out = L[p_inv]
return out
def log_det(self):
'''Returns the log determinant of `A`'''
out = np.sum(np.log(self.d))
return out
class _DensePosDefSolver(object):
'''
Computes to Cholesky factorization of the dense positive definite matrix `A`.
This uses low level LAPACK functions
'''
def __init__(self, A):
self.chol = _cholesky(A, lower=True)
def solve(self, b):
'''
Solves the equation `Ax = b` for `x`
'''
return _solve_cholesky(self.chol, b, lower=True)
def solve_L(self, b):
'''
Solves the equation `Lx = b` for `x`, where `L` is the Cholesky
decomposition.
'''
return _solve_triangular(self.chol, b, lower=True)
def L(self):
'''Returns the Cholesky decomposition of `A`'''
return self.chol
def log_det(self):
'''Returns the log determinant of `A`'''
out = 2*np.sum(np.log(np.diag(self.chol)))
return out
class PosDefSolver(object):
'''
Factors the positive definite matrix `A` as `LL^T = A` and provides an
efficient method for solving `Ax = b` for `x`. Additionally provides a method
to solve `Lx = b`, get the log determinant of `A`, and get `L`. `A` can be a
scipy sparse matrix or a numpy array.
Parameters
----------
A : (n, n) array or scipy sparse matrix
Positive definite matrix
'''
def __init__(self, A):
A = as_sparse_or_array(A, dtype=float)
if sp.issparse(A) & (not HAS_CHOLMOD):
warnings.warn(CHOLMOD_MSG)
A = A.toarray()
if sp.issparse(A):
self._solver = _SparsePosDefSolver(A)
else:
self._solver = _DensePosDefSolver(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
Parameters
----------
b : (n, | |
by (jumps)
pointerBoost = [0,0]
#the currently selected character/instruction
currentChar = self.code[self.pointerY][self.pointerX]
#if it is a command or code character
if currentChar in validParts or currentChar in otherCommands:
#if the character is the invoke command
if currentChar == "I":
#get which command the code makes up
command = self.whichCommand(self.parts)
if command == 0:
#right 1 pot
#increase selected pot
self.currentPot = self.currentPot + 1
#if this exceeds where there are currently pots
if self.currentPot >= len(self.manaPots):
#add a new empty pot
self.manaPots.append(0)
self.manaOutput.addPot()
#select the correct pot
self.manaOutput.changeSelected(self.currentPot)
self.outputDebug("Moving right 1 pot")
elif command == 1:
#left 1 pot
#move to the left
self.currentPot = self.currentPot - 1
#if at the left wall
if self.currentPot < 0:
#stay on first pot
self.currentPot = 0
#select the correct pot
self.manaOutput.changeSelected(self.currentPot)
self.outputDebug("Moving left 1 pot")
elif command == 2:
#add 1 to current pot
#call for 1 mana to be added to the current pot
self.manaPots = self.addMana(self.manaPots, 1, self.currentPot)
self.outputDebug("Adding 1 to pot " + str(self.currentPot))
elif command == 3:
#remove 1 from current pot
#if there is mana in the pot
if self.manaPots[self.currentPot] > 0:
#take 1 mana from the pot
self.manaPots[self.currentPot] = self.manaPots[self.currentPot] - 1
self.outputDebug("Taking 1 from pot " + str(self.currentPot))
elif command == 4:
#output value of current pot
#output the number
self.outputDebug("Outputting pot " + str(self.currentPot) + " value")
self.outputInt(self.manaPots[self.currentPot])
elif command == 5:
#output character of current pot
#output character
self.outputDebug("Outputting pot " + str(self.currentPot) + " character")
self.outputChar(self.manaPots[self.currentPot])
#a new line is needed when a number is printed
self.needLine = True
self.needLineDebug = True
elif command == 6:
#Input and add to current pot
#get amount to add
self.inputNeeded = True
self.inputButton.config(state="normal")
#focus the input
self.inputEntry.focus_set()
self.outputDebug("Getting user input (store value or character value in pot " + str(self.currentPot)+ ")")
elif command == 7:
#draw as much as possible to fill phial
#get the maximum amount that can be taken
maximum = 511 - self.manaPhial
#if the maximum is greater than or equal to the amount in the current pot
if maximum >= self.manaPots[self.currentPot]:
#take all into phial
self.manaPhial = self.manaPhial + self.manaPots[self.currentPot]
#set pot to empty
self.manaPots[self.currentPot] = 0
else:
#fill phial
self.manaPhial = 511
#remove maximum from current pot
self.manaPots[self.currentPot] = self.manaPots[self.currentPot] - maximum
self.outputDebug("Drawing from " + str(self.currentPot) + " to fill phial")
elif command == 8:
#pour all from phial into pot
#add the amount in the phial to the current pot
self.manaPots = self.addMana(self.manaPots, self.manaPhial, self.currentPot)
#empty phial
self.manaPhial = 0
self.outputDebug("Pouring phial into pot " + str(self.currentPot))
elif command == 9:
#stop
self.programComplete = True
self.outputDebug("Program Stop")
elif currentChar == "R":
#revoke - jump if empty pot
#if the current pot is empty
if self.manaPots[self.currentPot] == 0:
#move the pointer 1 further in it's current direction
pointerBoost[0] = self.pointerChange[0]
pointerBoost[1] = self.pointerChange[1]
self.outputDebug("Pot " + str(self.currentPot) +" is empty, jumping over")
else:
self.outputDebug("Pot " + str(self.currentPot) +" is not empty ("+ str(self.manaPots[self.currentPot]) +"), no jump")
elif currentChar == ">":
#switch to move right
self.pointerChange = [1, 0]
self.outputDebug("Moving right")
elif currentChar == "<":
#switch to move left
self.pointerChange = [-1, 0]
self.outputDebug("Moving left")
elif currentChar == "V":
#switch to move down
self.pointerChange = [0, 1]
self.outputDebug("Moving down")
elif currentChar == "^":
#switch to move up
self.pointerChange = [0, -1]
self.outputDebug("Moving up")
elif currentChar in validParts:
#it is Q,W or E
#replace the next part to replace whith this character
self.parts[self.partToReplace] = currentChar
#move to replace on one
self.partToReplace = self.partToReplace + 1
#if it is beyond the end
if self.partToReplace > 2:
#move back to start
self.partToReplace = 0
else:
returnValue = 1
#adjust instruction pointer
if not self.inputNeeded and not self.programComplete:
self.pointerX = self.pointerX + self.pointerChange[0] + pointerBoost[0]
self.pointerY = self.pointerY + self.pointerChange[1] + pointerBoost[1]
self.movePointer()
#button colours for parts
quasColour = "#103be8"
wexColour = "#e810ae"
exortColour = "#dd8006"
colours = []
#fill list with colours based on parts
for i in self.parts:
if i == "Q":
colours.append(quasColour)
elif i == "W":
colours.append(wexColour)
elif i == "E":
colours.append(exortColour)
else:
colours.append(self.defaultColour)
#iterate for the parts
for i in range(0, 3):
#set the colours and text
self.partsOutput[i].config(text=self.parts[i], bg=colours[i])
#set the phical output
self.phialOutput.config(text=self.manaPhial)
#iterate for the pots
for i in range(0, len(self.manaPots)):
#set the pot values
self.manaOutput.changeValue(i, self.manaPots[i])
#get the command for the currently set parts
comm = self.whichCommand(self.parts)
#set the tooltip to be correct for the command
if comm == -1:
self.tip
self.tip.tooltip_text = ["No Command"]
elif comm == 0:
self.tip.tooltip_text = ["Move right one pot"]
elif comm == 1:
self.tip.tooltip_text = ["Move left one pot"]
elif comm == 2:
self.tip.tooltip_text = ["Add 1 mana to current pot"]
elif comm == 3:
self.tip.tooltip_text = ["Take 1 mana from current pot"]
elif comm == 4:
self.tip.tooltip_text = ["Output number in current pot"]
elif comm == 5:
self.tip.tooltip_text = ["Output character of current pot"]
elif comm == 6:
self.tip.tooltip_text = ["Input value and add that mana to current pot"]
elif comm == 7:
self.tip.tooltip_text = ["Draw as much mana as possible from current pot to fill phial"]
elif comm == 8:
self.tip.tooltip_text = ["Pour all of phial into current pot"]
elif comm == 9:
self.tip.tooltip_text = ["Stop program"]
try:
#attempt to check if there is a break point
if self.codeWindow.checkBreakPoint(self.pointerX, self.pointerY):
#pause if there is
self.running = False
self.pauseButton.config(state = "disabled")
self.runButton.config(state = "normal")
#debug output
self.outputDebug("Breakpoint reached, pausing")
except:
#failed - code window is closed - don't do anything
pass
#if the program finished
if self.programComplete:
#output the end message
self.endMessage()
#-1 - means program ended
return -1
#return the default return value 0 - normal character, other - blank
return returnValue
def openMenu (root):
'''Create and run the default menu - to input the file path'''
#set the title and geometry
root.title("Invoke: File Open")
root.geometry("300x30")
#setup the grid
root.grid_rowconfigure(0, minsize=300)
root.grid_columnconfigure(0, minsize=30)
#add a frame to hold the input
mainFrame = tkinter.Frame(root)
mainFrame.grid(row=0, column=0, sticky=(N,E,S,W))
#create a stringvar to hold the path
path = tkinter.StringVar()
def convertToValid (fileInput):
'''Convert file data to array of valid characters only'''
validChars = ["Q", "W", "E", "R", "I", ">", "<", "V", "^"]
#convert to upper case
fileInput = fileInput.upper()
#create array to hold data in
data = []
#split into lines
lines = fileInput.split("\n")
#to store the max length
maxLength = -1
#iterate lines
for line in lines:
#if this line is longer than the current maximum
if len(line) > maxLength:
#update the maximum
maxLength = len(line)
#iterate lines
for line in lines:
#create list to hold the data for this line
lineData = []
#iteracte characters
for char in line:
#if it is a valid character
if char in validChars:
#add it to the list
lineData.append(char)
else:
#add a blank
lineData.append(" ")
#iterate for extra items that are needed
for i in range(maxLength - len(lineData)):
#add blanks to fill to square
lineData.append(" ")
#add line to data
data.append(lineData)
#return the array
return data
def load (pathString):
#initialize data as none - if no file can be obtained it won't proceed
fileData = None
#check for correct extension
if pathString.endswith(".inv"):
try:
#attempt to open and read the file
invokeFile = open(pathString, "r")
#store data in variable
fileData = invokeFile.read()
#close the file
invokeFile.close()
except:
#if something went wrong the file wasn't found
return None
return fileData
def attemptLoad ():
'''Load has been pressed so attempt a load'''
#get the path
pathData = path.get()
#load the files data
data = load(pathData)
#if there is some data
if data != None:
#convert to valid grid
data = convertToValid(data)
#if there is data to run
if len(data) > 0:
if len(data[0]) > 0:
#quit the path input menu
mainFrame.quit()
#open the main program
openProgram(root, data)
#once the program returns quit the main - prevents hanging
root.quit()
#setup the frame grid - | |
Data adn writes them to file
Parameters
----------
SD : DataContainer
Star Data in question
iter : int
Total Fits to compute and use for Errorbar calculation
"""
FileToWriteTo = outputFile
if 'File' in kwargs.keys():
FileToWriteTo = kwargs['File']
if 'UseSGRA_Pos' in kwargs.keys():
useSGRA_Pos = kwargs['UseSGRA_Pos']
else:
useSGRA_Pos = False
#------------------------------------------------------------------------
# MAIN LOOP
# main loop, generating new data and fitting to get a list for every parameter
for curIt in range(iter):
# generate new points within 1 sigma of error
newRA = generateMCData(SD.RA, SD.eRA)
newDE = generateMCData(SD.DE, SD.eDE)
newVR = generateMCData(SD.VR, SD.eVR)
print(SD.RA)
print(newRA)
print(SD.DE)
print(newDE)
# create copy of Star Data and overrite the points
NewSD = SD.copy()
NewSD.RA = newRA
NewSD.DE = newDE
NewSD.VR = newVR
# generate new position of sgr a* if needed, use global bounds
if useSGRA_Pos:
#local best fit
NewSGRA_Pos = np.random.normal(0,1,3) * GLOB_SGRA_Pos
else:
NewSGRA_Pos = np.array([0,0,0])
# change the position of sgr a*
kwargs['varSGRA'] = NewSGRA_Pos
if DEBUG_MODE:
print("New Position of SGR A*: ", NewSGRA_Pos)
# Fit the new Star Data
print("\n")
print('-'*25 + "Starting new Fit (%s/%s)" % (curIt+1, iter))
newFD, _ = FitDataInner(NewSD, kwargs=kwargs)
_tParVec = newFD.returnParVec()
# write data to file
f = open(FileToWriteTo, "a")
for j in range(len(_tParVec)):
f.write(str(_tParVec[j]) + " ")
f.write("\n")
f.close()
print("\nDone!\n")
def genMCD_MP(SD:DataContainer, pid:int, kwargs:dict={}):
"""
Calculates new Parameters after variating Star Data adn writes them to file
Parameters
----------
SD : DataContainer
Star Data in question
iter : int
Total Fits to compute and use for Errorbar calculation
"""
FileToWriteTo = outputFile
if 'File' in kwargs.keys():
FileToWriteTo = kwargs['File']
if 'UseSGRA_Pos' in kwargs.keys():
useSGRA_Pos = kwargs['UseSGRA_Pos']
else:
useSGRA_Pos = False
#------------------------------------------------------------------------
# MAIN LOOP
# generate new points within 1 sigma of error
newRA = generateMCData(SD.RA, SD.eRA)
newDE = generateMCData(SD.DE, SD.eDE)
newVR = generateMCData(SD.VR, SD.eVR)
# create copy of Star Data and overrite the points
NewSD = SD.copy()
NewSD.RA = newRA
NewSD.DE = newDE
NewSD.VR = newVR
if useSGRA_Pos:
NewSGRA_Pos = np.random.normal(0, 1, 3) * GLOB_SGRA_Pos
#NewSGRA_Pos = PosRadToReal(NewSGRA_Pos, _tDist)
else:
NewSGRA_Pos = np.array([0,0,0])
# still in mas
kwargs['varSGRA'] = NewSGRA_Pos
# Fit the new Star Data
print('-'*25 + "Starting new Fit (%s)" % (pid))
newFD, _ = FitDataInner(NewSD, kwargs=kwargs)
_tParVec = newFD.returnParVec()
# write data to file
f = open(FileToWriteTo, "a")
for j in range(len(_tParVec)):
f.write(str(_tParVec[j]) + " ")
f.write("\n")
f.close()
print("%s, Done!" % (pid))
def evaMCD(_fig, file:str):
"""
Evaluates Parameters written to file and calculates mean and std values for every parameter and prints them out
"""
print(file)
f = open(file, 'r')
lines = f.readlines()
h= []
for i in range(len(lines)):
_t = lines[i].strip()
_t = _t.split(" ")
_t = [float(x) for x in _t]
h.append(_t)
mean = []
std = []
g = []
histData = []
histName = [r'$M$ [$10^6 M_\odot$]', r'$R$ [kpc]', r'$e$ [1]', r'$a$ [$10^{-3}$pc]', r'$i$ [$^\circ$]', r'$T$ [yr]']
kartPos = []
N = ["Mass", "R", "e", "a", "i", "LAN", "argPeri", "MeanM", "T", "True Anomaly"]
print("-"*75)
for i in range(len(h)):
OE = getOrbitalElements(h[i])
# Mass, Distance, e, a, i, Omega, omega, M, T, True Anomaly
g.append( [ h[i][0], h[i][7], OE.Ecc, OE.MayAxis, OE.Incl, OE.LAN, OE.ArgPeri, OE.MeanM, OE.Period, OE.TAnom ] )
histData.append([ h[i][0]/1E6, h[i][7]/1E3, OE.Ecc, OE.MayAxis, OE.Incl, OE.Period ])
# position
kartPos.append( [ h[i][1], h[i][2], h[i][3], h[i][4], h[i][5], h[i][6] ] )
for j in range(len(g[0])):
ParDat = [g[i][j] for i in range(len(g))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
print("MCD: ", N[j], ", mean= ", mean[j], "; std= ", std[j])
print("Length of data: ", len(g))
'''
mean = []
std = []
N = ["x", "y", "z", "vx", "vy", "vz"]
for j in range(len(kartPos[0])):
ParDat = [kartPos[i][j] for i in range(len(kartPos))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
print("MCD: ", N[j], ", mean= ", mean[j], "; std= ", std[j])
'''
print("-"*75)
_fig.clf()
mean = []
std = []
for j in range(len(histData[0])):
ParDat = [histData[i][j] for i in range(len(histData))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
mean[3] *= 1E3
std[3] *= 1E3
for l in range(len(histData)):
histData[l][3] *= 1E3
for x in range(6):
for y in range(6):
if y <= x:
_tf = _fig.add_subplot(6,6, 6*x + y + 1)
_tf.grid(False)
_tf.set_aspect('auto')
#_tf.set_xlabel(histName[y])
#_tf.set_ylabel(histName[x])
if y != 0 or x == 0:
plt.yticks([])
else:
pass
plt.yticks( [4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11],
[4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11], size=8)
plt.yticks(size=8)
if x != 5 or y == 5:
plt.xticks([])
else:
pass
plt.xticks( [4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11],
[4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11], rotation=90, size=8)
#plt.xticks(rotation=90, size=8)
if y == x:
_t = [histData[i][x] for i in range(len(histData))]
plt.xlim(mean[x]-3*std[x],mean[x]+3*std[x])
_tf.hist(_t, bins=50)
plt.axvline(mean[x], color='black', linestyle='dashed')
plt.figtext(0.165 + 0.8/6 * x, 0.91 - 0.8/6 * x, round(mean[x], 3), ha='center', size=11 )
else:
_x = [histData[i][y] for i in range(len(histData))]
_y = [histData[i][x] for i in range(len(histData))]
_t = _tf.hist2d(_x, _y, bins=(20,20), cmap=cm.jet)
#_fig.tight_layout()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0, hspace=0)
for i in range(len(histName)):
# horizontal at bottom
plt.figtext(0.15 + 0.825/6 * i, 0.02, histName[i], {'ha':'center', 'size':9})
# vertical at left
plt.figtext(0.01, 0.15 + 0.825/6 * (5-i), histName[i], {'ha':'left', 'size':9})
def DrawChi2Slice(_fig, SD:DataContainer, parVec:list, bounds:list, IndexList:list, _dim:int=50, kwargs:dict={}):
"""
Draws chi2 distribution for M and R
Parameters
----------
SD : DataContainer
Star Data
parVec : list
Current parameter vecotr, preferably from minimum
varyIndex : list
index of parameter to be varied, needs to be length 2
bounds : list
bounds for both variables
"""
dim = _dim
chi2 = np.zeros((dim, dim))
Mvar = np.linspace(bounds[0][0], bounds[0][1], dim)
Rvar = np.linspace(bounds[1][0], bounds[1][1], dim)
iter = 0
_chi2File = open("SMBH/Data/dump/chi2Slice.txt", 'r')
lines = _chi2File.readlines()
h= []
for i in range(len(lines)):
_t = lines[i].strip()
_t = _t.split(" ")
_t = [float(x) for x in _t]
h.append(_t)
chi2 = np.array(h)
miny_i = []
for i in range(100):
_l = np.amin(chi2[i])
_m = np.argwhere(chi2[i] == _l).flatten()
miny_i.append(Mvar[_m])
minx_i = []
for j in range(100):
_m = np.amin(chi2[:,j])
_n = np.argwhere(chi2[:,j] == _m).flatten()
minx_i.append(Rvar[_n])
midy_i = []
for i in range(100):
_l = np.amin(chi2[i])
_m = np.argwhere(chi2[i] <= _l+1).flatten()[0]
midy_i.append(Mvar[_m])
midx_i = []
for j in range(100):
_m = np.amin(chi2[:,j])
_n = np.argwhere(chi2[:,j] <= _m+1).flatten()[0]
midx_i.append(Rvar[_n])
'''
# Distance
for i in range(dim):
# Mass
for j in range(dim):
iter += 1
newPar = parVec.copy()
newPar[0] = Mvar[j]*1E6 # mass
newPar[-1] = Rvar[i]*1E3 # distance
OrbEl = getOrbitalElements(newPar)
if (OrbEl.Period > 0 and OrbEl.Ecc < 1 and OrbEl.Period <= 20):
_FD = getOrbit(SD=SD, Orb=OrbEl, ParamVec=newPar, index=IndexList[0], max_iter=500000, stepsize=1E-8, kwargs=kwargs)
x = returnCombinedError(SD, _FD, IndexList)
chi2[i][j] = x
ProgressBar(iter, dim**2, "chi2= " + str(x))
else:
ProgressBar(iter, dim**2, "chi2= 1E10")
chi2[i][j] = 1E10
print("\nDone!")
'''
'''
_chi2File = open("SMBH/Data/dump/chi2Slice.txt", "w")
for i in range(dim):
for j in range(dim):
_chi2File.write( str(chi2[i][j]) + " " )
_chi2File.write("\n")
_chi2File.close()
'''
_min = np.argwhere(chi2 == np.amin(chi2)).flatten()
_minValue = [ Mvar[_min[1]], Rvar[_min[0]] ]
maxval = np.amax(chi2)
minval = np.amin(chi2)
levels = np.geomspace(minval,maxval, 25)
_fig.clf()
_tf = _fig.add_subplot(1,1,1)
_tf.grid(False)
_tf.set_aspect('auto')
_tf.set_xlabel(r'$R_0$ [kpc]', fontdict={'size':13})
_tf.set_ylabel(r'$M$ [$10^6 M_\odot$]', fontdict={'size':13})
xgit,ygit = np.meshgrid(Rvar, Mvar)
ax = _tf.contourf(xgit.T, ygit.T, chi2, cmap=cm.get_cmap('viridis'), levels=levels)
_fig.colorbar(ax)
_label = r"Min: $M$ [$10^6 M_\odot$]="+str(np.round(_minValue[0],2)) + r", $R_0$ [kpc]=" + str(np.round(_minValue[1],2))
_tf.scatter(_minValue[1], _minValue[0], label=_label, color='red', s=5)
_tf.plot(Rvar,miny_i,color='blue', label='min line')
print(_minValue)
_tf.legend(loc='best')
def determineDeltaOmega(FD:FitContainer) -> list:
startPos = ParVecToParams(FD.returnParVec())[1]
StartEr = startPos / np.linalg.norm(startPos)
#dotList = np.abs( np.dot( FD.PositionArray / np.linalg.norm(FD.PositionArray), StartEr ) )
dotList = np.empty(len(FD.PositionArray))
for i in range(len(FD.PositionArray)):
dot = FD.PositionArray[i] / np.linalg.norm(FD.PositionArray[i])
dotList[i] = ( np.abs( np.dot(dot, StartEr) ) )
xmin = np.argwhere(dotList <= 1E-2).flatten()
print(xmin)
miniVal = 9999999999
miniIndex = -1
for i in range(len(xmin)):
x = FD.PositionArray[i]
#print(x)
if not np.array_equal(x,startPos):
if np.dot(x,startPos) <= miniVal:
miniVal = np.dot(x/np.linalg.norm(x),StartEr)
miniIndex = i
if miniIndex >= 0:
OriginOE = getOrbitalElements(FD.returnParVec())
newParVec = [FD.Mass, *FD.PositionArray[miniIndex], *FD.VelocityArray[miniIndex], FD.Distance]
NewOE = getOrbitalElements(newParVec)
GR_Const = 6*np.pi*GLOB_G*FD.Mass / GLOB_c**2
GR_Predict = GR_Const / (OriginOE.MayAxis * (1 - OriginOE.Ecc**2))
GR_Predict_Degree = GR_Predict * 180 / np.pi
DeltaSim = np.abs(OriginOE.ArgPeri - NewOE.ArgPeri)
print("GR predicted [degr]: ", GR_Predict_Degree)
print(" Simulation | |
* x)
R0 += 0.00000002371 * mu.cost(4.75067664712 + 103.09277421860 * x)
R0 += 0.00000002963 * mu.cost(0.23381699914 + 20597.24396304120 * x)
R0 += 0.00000002190 * mu.cost(6.18344448099 + 3346.13535100720 * x)
R0 += 0.00000002444 * mu.cost(1.92547995169 + 7799.98064550240 * x)
R0 += 0.00000002121 * mu.cost(4.87491216115 + 9830.38901398780 * x)
R0 += 0.00000002532 * mu.cost(5.39550087270 + 3863.18984479360 * x)
R0 += 0.00000002101 * mu.cost(2.84309138388 + 3415.39402526710 * x)
R0 += 0.00000002176 * mu.cost(0.58632570025 + 162.46663613220 * x)
R0 += 0.00000002106 * mu.cost(3.06229353931 + 19800.94595622480 * x)
R0 += 0.00000002347 * mu.cost(3.90795942709 + 3335.08950239240 * x)
R0 += 0.00000002031 * mu.cost(5.52057907797 + 10021.90459040220 * x)
R0 += 0.00000001997 * mu.cost(2.77243710569 + 13936.79450513400 * x)
R0 += 0.00000002139 * mu.cost(5.40620646615 + 266.60704172180 * x)
R0 += 0.00000002147 * mu.cost(0.08966987600 + 13358.92658845020 * x)
R0 += 0.00000001996 * mu.cost(2.62541669265 + 20.77539549240 * x)
R0 += 0.00000001961 * mu.cost(4.88521794174 + 3237.51965248120 * x)
R0 += 0.00000002216 * mu.cost(1.06829128652 + 3320.25710730100 * x)
R0 += 0.00000002131 * mu.cost(3.02112533027 + 5625.36604155940 * x)
R0 += 0.00000002124 * mu.cost(3.68620121537 + 5618.31980486140 * x)
R0 += 0.00000001938 * mu.cost(1.29006691721 + 17924.91069982040 * x)
R0 += 0.00000002555 * mu.cost(4.91826220321 + 6604.95878212400 * x)
R0 += 0.00000002561 * mu.cost(2.10055088914 + 7910.18696672180 * x)
R0 += 0.00000001820 * mu.cost(5.57528712663 + 3351.24909204960 * x)
R0 += 0.00000001786 * mu.cost(5.77310414452 + 3607.21946842160 * x)
R0 += 0.00000001780 * mu.cost(4.48010071981 + 10818.13528691580 * x)
R0 += 0.00000002106 * mu.cost(5.75526661975 + 13365.97282514820 * x)
R0 += 0.00000001987 * mu.cost(2.61151965233 + 3546.79797513700 * x)
R0 += 0.00000001799 * mu.cost(2.73192475257 + 3360.96774609859 * x)
R0 += 0.00000001715 * mu.cost(1.50805385053 + 1692.16566950240 * x)
R0 += 0.00000001752 * mu.cost(2.21455466761 + 13119.72110282519 * x)
R0 += 0.00000001913 * mu.cost(3.32230688971 + 6702.00024889200 * x)
R0 += 0.00000001724 * mu.cost(1.43449979531 + 4885.96640967860 * x)
R0 += 0.00000002050 * mu.cost(1.19293239093 + 6660.44945790720 * x)
R0 += 0.00000001774 * mu.cost(2.18404386388 + 6784.31762761820 * x)
R0 += 0.00000001722 * mu.cost(4.86031154305 + 10014.72373309860 * x)
R0 += 0.00000001773 * mu.cost(2.09448668554 + 3603.69635007260 * x)
R0 += 0.00000001606 * mu.cost(3.48105136801 + 23141.55838292460 * x)
R0 += 0.00000001621 * mu.cost(5.73820120882 + 4555.34744602040 * x)
R0 += 0.00000001579 * mu.cost(1.88769198841 + 6298.32832117640 * x)
R0 += 0.00000001530 * mu.cost(5.16381564230 + 76.26607127560 * x)
R0 += 0.00000001615 * mu.cost(3.24110713658 + 3657.00429635640 * x)
R0 += 0.00000001576 * mu.cost(3.52622401575 + 6688.33840040040 * x)
R0 += 0.00000002034 * mu.cost(2.63620520451 + 16460.33352952499 * x)
R0 += 0.00000002025 * mu.cost(5.92907541624 + 10021.76996979660 * x)
R0 += 0.00000001689 * mu.cost(4.41053057494 + 5729.50644714900 * x)
R0 += 0.00000001878 * mu.cost(4.53291044847 + 3329.97576135000 * x)
R0 += 0.00000001530 * mu.cost(4.76331644411 + 7895.95987272020 * x)
R0 += 0.00000001529 * mu.cost(1.35289110986 + 1581.95934828300 * x)
R0 += 0.00000001807 * mu.cost(1.86212004697 + 2693.60159338500 * x)
R0 += 0.00000001855 * mu.cost(2.38561742394 + 6843.69148953180 * x)
R0 += 0.00000001518 * mu.cost(3.98476157750 + 6546.15977336420 * x)
R0 += 0.00000001389 * mu.cost(1.82099537095 + 9779.10867612540 * x)
R0 += 0.00000001447 * mu.cost(2.35649936427 + 6034.21402008480 * x)
R0 += 0.00000001386 * mu.cost(5.55304113895 + 4775.76008845920 * x)
R0 += 0.00000001372 * mu.cost(1.07224580315 + 12722.55242048520 * x)
R0 += 0.00000001423 * mu.cost(4.46530428193 + 574.34479833480 * x)
R0 += 0.00000001424 * mu.cost(2.57162391016 + 3399.98628861340 * x)
R0 += 0.00000001380 * mu.cost(5.76156315252 + 16335.83780453660 * x)
R0 += 0.00000001338 * mu.cost(2.97604558638 + 6127.65545055720 * x)
R0 += 0.00000001479 * mu.cost(4.74310691166 + 12566.15169998280 * x)
R0 += 0.00000001706 * mu.cost(0.30579918494 + 10551.52824519400 * x)
R0 += 0.00000001281 * mu.cost(2.00285974432 + 6677.63442474780 * x)
R0 += 0.00000001350 * mu.cost(0.78892333409 + 853.19638175200 * x)
R0 += 0.00000001534 * mu.cost(4.33326399444 + 640.87760738220 * x)
R0 += 0.00000001247 * mu.cost(1.02503908468 + 3024.22055704320 * x)
R0 += 0.00000001289 * mu.cost(1.92786975543 + 3347.65866339780 * x)
R0 += 0.00000001243 * mu.cost(2.44217806237 + 6684.81528205140 * x)
R0 += 0.00000001453 * mu.cost(1.74218016403 + 3333.56619000180 * x)
R0 += 0.00000001675 * mu.cost(1.79693456330 + 1118.75579210280 * x)
R0 += 0.00000001491 * mu.cost(2.59386711806 + 2494.52959194860 * x)
R0 += 0.00000001293 * mu.cost(3.31710472549 + 3407.09983561420 * x)
R0 += 0.00000001188 * mu.cost(4.92989260576 + 22743.40937951640 * x)
R0 += 0.00000001329 * mu.cost(1.99426530402 + 1228.96211332220 * x)
R0 += 0.00000001373 * mu.cost(2.53354987340 + 5459.37628707820 * x)
R0 += 0.00000001183 * mu.cost(4.25338096667 + 3344.49376205780 * x)
R0 += 0.00000001231 * mu.cost(2.50206227837 + 4356.27544458400 * x)
R0 += 0.00000001243 * mu.cost(2.65176267860 + 74.78159856730 * x)
R0 += 0.00000001285 * mu.cost(4.34087881585 + 3326.38533269820 * x)
R0 += 0.00000001119 * mu.cost(1.91321862491 + 3281.23856478620 * x)
R0 += 0.00000001094 * mu.cost(5.50748655535 + 3017.10701004240 * x)
R0 += 0.00000001259 * mu.cost(3.77654662830 + 11236.57229942000 * x)
R0 += 0.00000001285 * mu.cost(1.38335267684 + 3077.52850332700 * x)
R0 += 0.00000001100 * mu.cost(1.17130732373 + 6606.44325483230 * x)
R0 += 0.00000001115 * mu.cost(5.81275569652 + 2675.85638156980 * x)
R0 += 0.00000001380 * mu.cost(5.70641426169 + 2807.39834325620 * x)
R0 += 0.00000001256 * mu.cost(3.35479933251 + 4039.88357492740 * x)
R0 += 0.00000001187 * mu.cost(2.41348693872 + 10596.18207843420 * x)
R0 += 0.00000001052 * mu.cost(3.33521939538 + 3304.58456002240 * x)
R0 += 0.00000001188 * mu.cost(5.84735836632 + 3336.73109134180 * x)
R0 += 0.00000001072 * mu.cost(2.78383184435 + 8270.29774868340 * x)
R0 += 0.00000001105 * mu.cost(3.03463252672 + 3929.67725370800 * x)
R0 += 0.00000001013 * mu.cost(3.52026711847 + 8013.27974094040 * x)
R0 += 0.00000001079 * mu.cost(0.51857999039 + 2814.44457995420 * x)
R0 += 0.00000000999 * mu.cost(4.72734008760 + 533.21408344360 * x)
R0 += 0.00000001131 * mu.cost(0.52584038526 + 6816.28993343500 * x)
R0 += 0.00000001191 * mu.cost(0.60874292520 + 2301.58581590939 * x)
R0 += 0.00000001313 * mu.cost(2.07273299121 + 23539.70738633280 * x)
R0 += 0.00000000996 * mu.cost(4.03971126547 + 16062.18452611680 * x)
R0 += 0.00000000954 * mu.cost(5.90340414098 + 20206.14119633100 * x)
R0 += 0.00000000993 * mu.cost(0.07132588892 + 24150.08005134500 * x)
R0 += 0.00000001051 * mu.cost(2.22096534870 + 3980.50971301380 * x)
R0 += 0.00000001089 * mu.cost(1.25512213569 + 5938.23479286700 * x)
R0 += 0.00000000912 * mu.cost(2.54221161167 + 433.71173787680 * x)
R0 += 0.00000001249 * mu.cost(0.60003625956 + 16173.37116840440 * x)
R0 += 0.00000001027 * mu.cost(4.95999945094 + 19676.45023123640 * x)
R0 += 0.00000001108 * mu.cost(4.34209448160 + 3339.12795399150 * x)
R0 += 0.00000001188 * mu.cost(6.21563747433 + 2679.37949991880 * x)
R0 += 0.00000000849 * mu.cost(0.82548606454 + 2597.62236616720 * x)
R0 += 0.00000001145 * mu.cost(4.48151980872 + 19402.79695281660 * x)
R0 += 0.00000000948 * mu.cost(1.30280088857 + 8273.82086703240 * x)
R0 += 0.00000001016 * mu.cost(5.14464815830 + 1596.18644228460 * x)
R0 += 0.00000000832 * mu.cost(5.60623652030 + 3340.19235060619 * x)
R0 += 0.00000001035 * mu.cost(4.71893106874 + 419.48464387520 * x)
R0 += 0.00000000903 * mu.cost(0.45419000582 + 12995.22537783680 * x)
R0 += 0.00000001089 * mu.cost(0.51294377637 + 11250.79939342160 * x)
R0 += 0.00000000840 * mu.cost(5.30858028008 + 26084.02180621620 * x)
R0 += 0.00000000990 * mu.cost(2.06776368865 + 7255.56965173440 * x)
R0 += 0.00000000808 * mu.cost(6.25630819993 + 15508.61512327440 * x)
R0 += 0.00000000806 * mu.cost(3.09007612135 + 5415.65737477320 * x)
R0 += 0.00000000782 * mu.cost(4.62274599734 + 2547.83753823240 * x)
R0 += 0.00000000963 * mu.cost(2.10680539916 + 6456.88005769770 * x)
R0 += 0.00000000778 * mu.cost(3.56602161857 + 12721.57209941700 * x)
R0 += 0.00000000873 * mu.cost(5.09097164784 + 2540.79130153440 * x)
R0 += 0.00000000772 * mu.cost(3.08101797047 + 11081.21921028860 * x)
R0 += 0.00000000965 * mu.cost(2.33106703115 + 18454.60166491500 * x)
R0 += 0.00000000859 * mu.cost(4.14788214122 + 6438.49624942560 * x)
R0 += 0.00000001012 * mu.cost(4.45011664540 + 3316.73398895200 * x)
R0 += 0.00000000906 * mu.cost(4.29336078401 + 3344.54457996290 * x)
R0 += 0.00000000741 * mu.cost(2.61446403580 + 2284.75361485960 * x)
R0 += 0.00000000790 * mu.cost(6.03436225041 + 12509.25332504720 * x)
R0 += 0.00000000738 * mu.cost(0.52092422137 + 18052.92954315780 * x)
R0 += 0.00000000737 * mu.cost(4.11165247543 + 3760.09707057500 * x)
R0 += 0.00000000727 * mu.cost(3.28066632751 + 3510.19260983280 * x)
R0 += 0.00000001006 * mu.cost(0.45037465289 + 27490.69247804480 * x)
R0 += 0.00000000923 * mu.cost(2.78717931388 + 1332.05488754080 * x)
R0 += 0.00000000756 * mu.cost(0.86881841787 + 1545.35398297880 * x)
R0 += 0.00000000774 * mu.cost(3.71535541900 + 6571.01853218020 * x)
R0 += 0.00000000900 * mu.cost(2.74944190055 + 316.39186965660 * x)
R0 += 0.00000000704 * mu.cost(1.89617185328 + 13362.43245314700 * x)
R0 += 0.00000000701 * mu.cost(2.21328293796 + 20995.39296644940 * x)
R0 += 0.00000000701 * mu.cost(3.92689438700 + 13362.46696045140 * x)
R0 += 0.00000000695 * mu.cost(5.52658147215 + 3364.49086444760 * x)
| |
+ m)), round(255 * (rgb_[1] + m)), round(255 * (rgb_[2] + m)))
def hsv_to_cmyk_nocheck(hue, saturation, value):
saturation /= 100
value /= 100
c = value * saturation
x = c * (1 - abs((hue / 60) % 2 - 1))
m = value - c
if hue < 60:
rgb_ = (c, x, 0)
elif hue < 120:
rgb_ = (x, c, 0)
elif hue < 180:
rgb_ = (0, c, x)
elif hue < 240:
rgb_ = (0, x, c)
elif hue < 300:
rgb_ = (x, 0, c)
else:
rgb_ = (c, 0, x)
red = rgb_[0] + m
green = rgb_[1] + m
blue = rgb_[2] + m
k = 1 - max(red, green, blue)
k_inverse = 1 - k
return round(100 * (k_inverse - red) / k_inverse), \
round(100 * (k_inverse - green) / k_inverse), \
round(100 * (k_inverse - blue) / k_inverse), \
round(100 * k)
def hsv_to_hsl_nocheck(hue, saturation, value):
saturation /= 100
value /= 100
L = value - (0.5 * value * saturation)
saturation = 0 if L == 0 else ((value - L) / min(L, 1 - L))
return hue, round(100 * saturation), round(100 * L)
# HSL to other format
def hsl_to_rgb_nocheck(hue, saturation, luminance):
saturation /= 100
luminance /= 100
c = (1 - abs(2 * luminance - 1)) * saturation
x = c * (1 - abs((hue / 60) % 2 - 1))
m = luminance - c / 2
if hue < 60:
rgb_ = (c, x, 0)
elif hue < 120:
rgb_ = (x, c, 0)
elif hue < 180:
rgb_ = (0, c, x)
elif hue < 240:
rgb_ = (0, x, c)
elif hue < 300:
rgb_ = (x, 0, c)
else:
rgb_ = (c, 0, x)
return round(255 * (rgb_[0] + m)), round(255 * (rgb_[1] + m)), round(255 * (rgb_[2] + m))
def hsl_to_hex_nocheck(hue, saturation, luminance):
saturation /= 100
luminance /= 100
c = (1 - abs(2 * luminance - 1)) * saturation
x = c * (1 - abs((hue / 60) % 2 - 1))
m = luminance - c / 2
if hue < 60:
rgb_ = (c, x, 0)
elif hue < 120:
rgb_ = (x, c, 0)
elif hue < 180:
rgb_ = (0, c, x)
elif hue < 240:
rgb_ = (0, x, c)
elif hue < 300:
rgb_ = (x, 0, c)
else:
rgb_ = (c, 0, x)
return "#%02x%02x%02x" % (round(255 * (rgb_[0] + m)), round(255 * (rgb_[1] + m)), round(255 * (rgb_[2] + m)))
def hsl_to_cmyk_nocheck(hue, saturation, luminance):
saturation /= 100
luminance /= 100
c = saturation * (1 - abs(2 * luminance - 1))
m = luminance - c / 2
x = c * (1 - abs((hue / 60) % 2 - 1))
if hue < 60:
rgb_ = (c, x, 0)
elif hue < 120:
rgb_ = (x, c, 0)
elif hue < 180:
rgb_ = (0, c, x)
elif hue < 240:
rgb_ = (0, x, c)
elif hue < 300:
rgb_ = (x, 0, c)
else:
rgb_ = (c, 0, x)
red = rgb_[0] + m
green = rgb_[1] + m
blue = rgb_[2] + m
k = 1 - max(red, green, blue)
k_inverse = 1 - k
return round(100 * (k_inverse - red) / k_inverse), \
round(100 * (k_inverse - green) / k_inverse), \
round(100 * (k_inverse - blue) / k_inverse), \
round(100 * k)
def hsl_to_hsv_nocheck(hue, saturation, luminance):
saturation /= 100
luminance /= 100
v = luminance + saturation * min(luminance, 1 - luminance)
saturation = 0 if v == 0 else 2 - (2 * luminance / v)
return hue, round(100 * saturation), round(100 * v)
# -------------------------------------------------------------------------
# COLOUR DEFINITIONS
# The Blacks, Greys, and Whites
BLACK = ColourRGB(0, 0, 0)
DARKEST_GREY = ColourRGB(30, 30, 30)
DARKER_GREY = ColourRGB(40, 40, 40)
DARK_GREY = ColourRGB(45, 45, 45)
DARKISH_GREY = ColourRGB(60, 60, 60)
GREY = ColourRGB(100, 100, 100)
LIGHTISH_GREY = ColourRGB(130, 130, 130)
LIGHT_GREY = ColourRGB(160, 160, 160)
LIGHTER_GREY = ColourRGB(187, 187, 187)
LIGHTEST_GREY = ColourRGB(210, 210, 210)
DARK_WHITE = ColourRGB(240, 240, 240)
WHITE = ColourRGB(255, 255, 255)
# Blue-Greys
DARKEST_BLUE_GREY = ColourRGB(30, 32, 34)
DARKER_BLUE_GREY = ColourRGB(40, 42, 44)
DARK_BLUE_GREY = ColourRGB(49, 51, 53)
DARKISH_BLUE_GREY = ColourRGB(55, 57, 59)
BLUE_GREY = ColourRGB(63, 75, 86)
LIGHTISH_BLUE_GREY = ColourRGB(83, 95, 106)
LIGHT_BLUE_GREY = ColourRGB(103, 115, 126)
LIGHTER_BLUE_GREY = ColourRGB(133, 145, 156)
LIGHTEST_BLUE_GREY = ColourRGB(173, 185, 196)
# Warm Colours
DARKEST_RED = ColourRGB(48, 11, 8)
DARKER_RED = ColourRGB(64, 13, 9)
DARK_RED = ColourRGB(99, 18, 12)
DARKISH_RED = ColourRGB(143, 23, 12)
RED = ColourRGB(194, 22, 6)
LIGHTISH_RED = ColourRGB(224, 66, 52)
LIGHT_RED = ColourRGB(255, 94, 79)
PINK = ColourRGB(255, 122, 110)
LIGHTISH_PINK = ColourRGB(255, 133, 122)
LIGHT_PINK = ColourRGB(255, 161, 153)
LIGHTER_PINK = ColourRGB(255, 194, 189)
LIGHTEST_PINK = ColourRGB(255, 224, 222)
ABSOLUTE_RED = ColourRGB(255, 0, 0)
# Orange & Brown Shades from: https://graf1x.com/shades-of-orange-color-palette/
# ORANGES
MELON_ORANGE = ColourRGB(247, 152, 98)
SALAMANDER_ORANGE = ColourRGB(240, 94, 35)
SANDSTONE_ORANGE = ColourRGB(215, 144, 44)
GINGER_ORANGE = ColourRGB(190, 85, 4)
SQUASH_ORANGE = ColourRGB(203, 92, 13)
ORANGE = ColourRGB(252, 102, 0)
ROYAL_ORANGE = ColourRGB(249, 129, 42)
TIGER_ORANGE = ColourRGB(253, 106, 2)
APRICOT_ORANGE = ColourRGB(239, 130, 13)
OCHRE_ORANGE = ColourRGB(204, 119, 34)
FIRE_ORANGE = ColourRGB(253, 165, 15)
CARROT_ORANGE = ColourRGB(239, 114, 21)
PUMPKIN_ORANGE = ColourRGB(255, 116, 23)
HONEY_ORANGE = ColourRGB(235, 150, 5)
# BROWNS
DARK_AMBER_BROWN = ColourRGB(136, 48, 0)
BRONZE_BROWN = ColourRGB(177, 86, 15)
CLAY_BROWN = ColourRGB(129, 63, 11)
BURNT_BROWN = ColourRGB(150, 64, 0)
LIGHTEST_BROWN = ColourRGB(168, 145, 113)
LIGHTER_BROWN = ColourRGB(145, 119, 83)
LIGHT_BROWN = ColourRGB(122, 91, 47)
BROWN = ColourRGB(156, 91, 0)
DARKER_BROWN = ColourRGB(94, 62, 18)
DARKEST_BROWN = ColourRGB(64, 38, 3)
# YELLOWS
# From https://graf1x.com/shades-of-yellow-color-palette-chart/
GOLD = ColourRGB(249, 166, 2)
GOLDENROD_YELLOW = ColourRGB(218, 165, 32)
YELLOW = ColourRGB(252, 226, 5)
AMBER_YELLOW = ColourRGB(255, 191, 0)
ROYAL_YELLOW = ColourRGB(250, 218, 94)
MUSTARD_YELLOW = ColourRGB(254, 220, 86)
MELLOW_YELLOW = ColourRGB(248, 222, 126)
FLAX_YELLOW = ColourRGB(238, 220, 130)
CREAM_YELLOW = ColourRGB(255, 253, 208)
CHROME_YELLOW = ColourRGB(255, 204, 0)
TROMBONE_YELLOW = ColourRGB(210, 181, 91)
ABSOLUTE_YELLOW = ColourRGB(255, 255, 0)
# Greens
LIGHTEST_OLIVE_GREEN = ColourRGB(210, 225, 144)
LIGHTER_OLIVE_GREEN = ColourRGB(196, 216, 108)
LIGHT_OLIVE_GREEN = ColourRGB(181, 207, 73)
LIGHTISH_OLIVE_GREEN = ColourRGB(163, 191, 69)
OLIVE_GREEN = ColourRGB(129, 161, 64)
DARKISH_OLIVE_GREEN = ColourRGB(116, 146, 58)
DARK_OLIVE_GREEN = ColourRGB(97, 123, 48)
DARKER_OLIVE_GREEN = ColourRGB(66, 84, 31)
DARKEST_OLIVE_GREEN = ColourRGB(46, 64, 11)
LIGHTEST_GREEN = ColourRGB(200, 230, 201)
LIGHTER_GREEN = ColourRGB(165, 214, 167)
LIGHT_GREEN = ColourRGB(129, 199, 132)
LIGHTISH_GREEN = ColourRGB(102, 187, 106)
GREEN = ColourRGB(76, 175, 80)
DARKISH_GREEN = ColourRGB(67, 160, 71)
DARK_GREEN = ColourRGB(56, 142, 60)
DARKER_GREEN = ColourRGB(46, 125, 50)
DARKEST_GREEN = ColourRGB(27, 83, 32)
ABSOLUTE_GREEN = ColourRGB(0, 255, 0)
# Blues
DARKEST_NAVY_BLUE = ColourRGB(20, 27, 34)
DARKER_NAVY_BLUE = ColourRGB(30, 37, 44)
DARK_NAVY_BLUE = ColourRGB(38, 45, 56)
DARKISH_NAVY_BLUE = ColourRGB(45, 57, 68)
NAVY_BLUE = ColourRGB(40, 57, 81)
LIGHTISH_NAVY_BLUE = ColourRGB(35, 57, 93)
LIGHT_NAVY_BLUE = ColourRGB(45, 67, 103)
LIGHTER_NAVY_BLUE = ColourRGB(65, 87, 123)
LIGHTEST_NAVY_BLUE = ColourRGB(85, 107, 143)
LIGHTEST_BLUE = ColourRGB(227, 242, 253)
LIGHTER_BLUE = ColourRGB(187, 222, 251)
LIGHT_BLUE = ColourRGB(144, 202, 255)
LIGHTISH_BLUE = ColourRGB(100, 181, 246)
BLUE = ColourRGB(33, 165, 245)
DARKISH_BLUE = ColourRGB(30, 136, 229)
DARK_BLUE = ColourRGB(25, 118, 210)
DARKER_BLUE = ColourRGB(21, 101, 192)
DARKEST_BLUE = ColourRGB(12, 70, 160)
ABSOLUTE_BLUE = ColourRGB(0, 0, 255)
DARKEST_TURQUOISE = ColourRGB(0, 56, 41)
DARKER_TURQUOISE = ColourRGB(0, 106, 78)
DARK_TURQUOISE = ColourRGB(46, 133, 110)
DARKISH_TURQUOISE = ColourRGB(69, 146, 126)
TURQUOISE = ColourRGB(92, 160, 142)
LIGHTISH_TURQUOISE = ColourRGB(115, 173, 158)
LIGHT_TURQUOISE = ColourRGB(138, 186, 174)
LIGHTER_TURQUOISE = ColourRGB(161, 200, 190)
LIGHTEST_TURQUOISE = ColourRGB(184, 213, 205)
ABSOLUTE_CYAN = ColourRGB(0, 255, 255)
# Purples & Violets
LIGHTEST_PURPLE = ColourRGB(225, 190, 231)
LIGHTER_PURPLE = ColourRGB(206, 147, 216)
LIGHT_PURPLE = ColourRGB(186, 104, 200)
LIGHTISH_PURPLE = ColourRGB(171, 71, 188)
PURPLE = ColourRGB(156, 39, 176)
DARKISH_PURPLE = ColourRGB(142, 36, 170)
DARK_PURPLE = ColourRGB(123, 31, 162)
DARKER_PURPLE = ColourRGB(106, 27, 154)
DARKEST_PURPLE = ColourRGB(74, 20, 140)
ABSOLUTE_PURPLE = ColourRGB(255, 0, 255)
LIGHTEST_VIOLET = ColourRGB(209, 196, 233)
LIGHTER_VIOLET = ColourRGB(179, 157, 219)
LIGHT_VIOLET = ColourRGB(149, 117, 205)
LIGHTISH_VIOLET = ColourRGB(126, 87, 194)
VIOLET = ColourRGB(103, 58, 183)
DARKISH_VIOLET = ColourRGB(94, 53, 177)
DARK_VIOLET = ColourRGB(81, 45, 168)
DARKER_VIOLET = ColourRGB(69, 39, 160)
DARKEST_VIOLET = ColourRGB(49, 27, 146)
GREYS = [BLACK, DARKEST_GREY, DARKER_GREY, DARK_GREY, DARKISH_GREY, GREY, LIGHTISH_GREY, LIGHT_GREY, LIGHTER_GREY,
LIGHTEST_GREY, DARK_WHITE, WHITE]
BLUE_GREYS = [DARKEST_BLUE_GREY, DARKER_BLUE_GREY, DARK_BLUE_GREY, DARKISH_BLUE_GREY, BLUE_GREY, LIGHTISH_BLUE_GREY,
LIGHT_BLUE_GREY, LIGHTER_BLUE_GREY, LIGHTEST_BLUE_GREY]
REDS = [DARKEST_RED, DARKER_RED, DARK_RED, DARKISH_RED, RED, LIGHTISH_RED, LIGHT_RED, ABSOLUTE_RED]
PINKS = [PINK, LIGHTISH_PINK, LIGHT_PINK, LIGHTER_PINK, LIGHTEST_PINK]
ORANGES = [MELON_ORANGE, SALAMANDER_ORANGE, SANDSTONE_ORANGE, GINGER_ORANGE, SQUASH_ORANGE, ORANGE, ROYAL_ORANGE,
TIGER_ORANGE, APRICOT_ORANGE, OCHRE_ORANGE, FIRE_ORANGE, CARROT_ORANGE, PUMPKIN_ORANGE, HONEY_ORANGE]
BROWNS = [DARK_AMBER_BROWN, BRONZE_BROWN, CLAY_BROWN, BURNT_BROWN, LIGHTEST_BROWN, LIGHTER_BROWN, LIGHT_BROWN, BROWN,
DARKER_BROWN, DARKEST_BROWN]
YELLOWS = [GOLD, GOLDENROD_YELLOW, YELLOW, AMBER_YELLOW, ROYAL_YELLOW, MUSTARD_YELLOW, MELLOW_YELLOW, FLAX_YELLOW,
CREAM_YELLOW, CHROME_YELLOW, TROMBONE_YELLOW, ABSOLUTE_YELLOW]
OLIVE_GREENS = [DARKEST_OLIVE_GREEN, DARKER_OLIVE_GREEN, DARK_OLIVE_GREEN, DARKISH_OLIVE_GREEN, OLIVE_GREEN,
LIGHTISH_OLIVE_GREEN, LIGHTEST_OLIVE_GREEN, LIGHTER_OLIVE_GREEN, LIGHT_OLIVE_GREEN]
GREENS = [DARKEST_GREEN, DARKER_GREEN, DARK_GREEN, DARKISH_GREEN, GREEN, LIGHTISH_GREEN, LIGHT_GREEN, LIGHTER_GREEN,
LIGHTEST_GREEN, ABSOLUTE_GREEN]
NAVY_BLUES = [DARKEST_NAVY_BLUE, DARKER_NAVY_BLUE, DARK_NAVY_BLUE, DARKISH_NAVY_BLUE, NAVY_BLUE, LIGHTISH_NAVY_BLUE,
| |
4
def process_doc_info(doc_info: DocInfo, success: List[str], fail: List[str], doc_infos: List[DocInfo], seen_docs: Dict[str, DocInfo]):
if doc_info.error_msg == EMPTY_FILE_MSG:
# ignore empty files
return
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
elif doc_info.id in seen_docs:
fail.append(f'{doc_info.readme} (duplicate with {seen_docs[doc_info.id].readme})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
seen_docs[doc_info.id] = doc_info
def create_docs(content_dir: str, target_dir: str, regex_list: List[str], prefix: str, private_pack_prefix: str):
print(f'Using BRANCH: {BRANCH}')
# Search for readme files
readme_files = findfiles(regex_list, content_dir)
print(f'Processing: {len(readme_files)} {prefix} files ...')
if MAX_FILES > 0:
print(f'PREVIEW MODE. Truncating file list to: {MAX_FILES}')
random.shuffle(readme_files)
readme_files = readme_files[:MAX_FILES]
if FILE_REGEX:
print(f'PREVIEW MODE. Matching only files which match: {FILE_REGEX}')
regex = re.compile(FILE_REGEX)
readme_files = list(filter(regex.search, readme_files))
target_sub_dir = f'{target_dir}/{prefix}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
relative_imgs_dir = "../../../docs/doc_imgs/reference/relative"
imgs_dir = os.path.abspath(f'{target_sub_dir}/{relative_imgs_dir}')
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
seen_docs: Dict[str, DocInfo] = {}
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_readme_doc, target_sub_dir, content_dir, prefix, imgs_dir, relative_imgs_dir), readme_files):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for doc_info in process_extra_docs(target_sub_dir, prefix):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for private_doc_info in process_extra_docs(target_sub_dir, prefix, private_packs=True,
private_packs_prefix=private_pack_prefix):
process_doc_info(private_doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {prefix} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {prefix} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if len(fail) > MAX_FAILURES:
print(f'MAX_FAILURES of {len(fail)} exceeded limit: {MAX_FAILURES}. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def create_releases(target_dir: str):
releases_dir = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/{RELEASES_PREFIX}'
target_sub_dir = f'{target_dir}/{RELEASES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
release_files = glob.glob(f'{releases_dir}/*.md')
doc_infos: List[DocInfo] = []
success = []
fail = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_release_doc, target_sub_dir), release_files):
if not doc_info: # case that we skip a release doc as it is too old
continue
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
org_print(f'\n===========================================\nSuccess release docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed release docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed releases. Aborting!!')
sys.exit(3)
return sorted(doc_infos, key=lambda d: StrictVersion(d.name.lower().partition('content release ')[2]), reverse=True)
def create_articles(target_dir: str):
target_sub_dir = f'{target_dir}/{ARTICLES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
seen_docs: Dict[str, DocInfo] = {}
for doc_info in process_extra_docs(target_sub_dir, ARTICLES_PREFIX):
if not doc_info.description: # fail the build if no description for an article
raise ValueError(f'Missing description for article: {doc_info.id} ({doc_info.name})')
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {ARTICLES_PREFIX} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {ARTICLES_PREFIX} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed articles. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def insert_approved_tags_and_usecases():
with open('approved_usecases.json', 'r') as f:
approved_usecases = json.loads(f.read()).get('approved_list')
approved_usecases_string = '\n '.join(approved_usecases)
with open('approved_tags.json', 'r') as f:
approved_tags = json.loads(f.read()).get('approved_list')
approved_tags_string = '\n '.join(approved_tags)
with open("../docs/documentation/pack-docs.md", "r+") as f:
pack_docs = f.readlines()
f.seek(0)
for line in pack_docs:
if '***Use-case***' in line:
line += f"""
<details>
<summary>Pack Use-cases</summary>
{approved_usecases_string}
</details>
"""
if '***Tags***' in line:
line += f"""
<details>
<summary>Pack Tags</summary>
{approved_tags_string}
</details>
"""
f.write(line)
def is_xsoar_supported_pack(pack_dir: str):
with open(f'{pack_dir}/pack_metadata.json', 'r') as f:
metadata = json.load(f)
return 'xsoar' == metadata.get('support')
def get_blame_date(content_dir: str, file: str, line: int):
file_rel = os.path.relpath(file, content_dir)
blame_out = subprocess.check_output(['git', 'blame', '-p', '-L', f'{line},+1', file_rel], text=True, cwd=content_dir)
auth_date = re.search(r'^author-time\s+(\d+)', blame_out, re.MULTILINE)
if not auth_date:
raise ValueError(f'author-date not found for blame output of file: [{file}]: {blame_out}')
return datetime.utcfromtimestamp(int(auth_date.group(1)))
def get_deprecated_display_dates(dep_date: datetime) -> Tuple[str, str]:
"""Get the deprecation start date. The 1st of the following month.
Args:
dep_date (datetime): The raw dep date
Returns:
tuple of start deprecation and end deprecation
"""
DATE_FRMT = "%b %d, %Y"
start = datetime(day=1, month=dep_date.month, year=dep_date.year) + relativedelta(months=+1)
end = start + relativedelta(months=+6)
return (datetime.strftime(start, DATE_FRMT), datetime.strftime(end, DATE_FRMT))
def find_deprecated_integrations(content_dir: str):
files = glob.glob(content_dir + '/Packs/*/Integrations/*.yml')
files.extend(glob.glob(content_dir + '/Packs/*/Integrations/*/*.yml'))
res: List[DeprecatedInfo] = []
# go over each file and check if contains deprecated: true
for f in files:
with open(f, 'r') as fr:
content = fr.read()
if dep_search := re.search(r'^deprecated:\s*true', content, re.MULTILINE):
pack_dir = re.match(r'.+/Packs/.+?(?=/)', f)
if is_xsoar_supported_pack(pack_dir.group(0)): # type: ignore[union-attr]
yml_data = yaml.safe_load(content)
id = yml_data.get('commonfields', {}).get('id') or yml_data['name']
name: str = yml_data.get('display') or yml_data['name']
desc = yml_data.get('description')
content_to_search = content[:dep_search.regs[0][0]]
lines_search = re.findall(r'\n', content_to_search)
blame_line = 1
if lines_search:
blame_line += len(lines_search)
dep_date = get_blame_date(content_dir, f, blame_line)
maintenance_start, eol_start = get_deprecated_display_dates(dep_date)
dep_suffix = "(Deprecated)"
if name.endswith(dep_suffix):
name = name.replace(dep_suffix, "").strip()
info = DeprecatedInfo(id=id, name=name, description=desc, note=get_extracted_deprecated_note(desc),
maintenance_start=maintenance_start, eol_start=eol_start)
print(f'Adding deprecated integration: [{name}]. Deprecated date: {dep_date}. From file: {f}')
res.append(info)
else:
print(f'Skippinng deprecated integration: {f} which is not supported by xsoar')
return res
def merge_deprecated_info(deprecated_list: List[DeprecatedInfo], deperecated_info_file: str):
with open(deperecated_info_file, "rt") as f:
to_merge_list: List[DeprecatedInfo] = json.load(f)['integrations']
to_merge_map = {i['id']: i for i in to_merge_list}
merged_list: List[DeprecatedInfo] = []
for d in deprecated_list:
if d['id'] in to_merge_map:
d = {**d, **to_merge_map[d['id']]} # type: ignore[misc]
merged_list.append(d)
merged_map = {i['id']: i for i in merged_list}
for k, v in to_merge_map.items():
if k not in merged_map:
merged_list.append(v)
return merged_list
def add_deprected_integrations_info(content_dir: str, deperecated_article: str, deperecated_info_file: str, assets_dir: str):
"""Will append the deprecated integrations info to the deprecated article
Args:
content_dir (str): content dir to search for deprecated integrations
deperecated_article (str): deprecated article (md file) to add to
deperecated_info_file (str): json file with static deprecated info to merge
"""
deprecated_infos = merge_deprecated_info(find_deprecated_integrations(content_dir), deperecated_info_file)
deprecated_infos = sorted(deprecated_infos, key=lambda d: d['name'].lower() if 'name' in d else d['id'].lower()) # sort by name
deperecated_json_file = f'{assets_dir}/{os.path.basename(deperecated_article.replace(".md", ".json"))}'
with open(deperecated_json_file, 'w') as f:
json.dump({
'description': 'Generated machine readable doc of deprecated integrations',
'integrations': deprecated_infos
}, f, indent=2)
deperecated_infos_no_note = [i for i in deprecated_infos if not i['note']]
deperecated_json_file_no_note = deperecated_json_file.replace('.json', '.no_note.json')
with open(deperecated_json_file_no_note, 'w') as f:
json.dump({
'description': 'Generated doc of deprecated integrations which do not contain a note about replacement or deprecation reason',
'integrations': deperecated_infos_no_note
}, f, indent=2)
with open(deperecated_article, "at") as f:
for d in deprecated_infos:
f.write(f'\n## {d["name"] if d.get("name") else d["id"]}\n')
if d.get("maintenance_start"):
f.write(f'* **Maintenance Mode Start Date:** {d["maintenance_start"]}\n')
if d.get("eol_start"):
f.write(f'* **End-of-Life Date:** {d["eol_start"]}\n')
if d.get("note"):
f.write(f'* **Note:** {d["note"]}\n')
f.write('\n\n----\nA machine readable version of this file'
f' is available [here](pathname:///assets/{os.path.basename(deperecated_json_file)}).\n')
org_print("\n===========================================\n")
def main():
parser = argparse.ArgumentParser(description='''Generate Content Docs. You should probably not call this script directly.
See: https://github.com/demisto/content-docs/#generating-reference-docs''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--target", help="Target dir to generate docs at.", required=True)
parser.add_argument("-d", "--dir", help="Content repo dir.", required=True)
args = parser.parse_args()
print(f'Using multiprocess pool size: {POOL_SIZE}')
print('Starting MDX server...')
start_mdx_server()
prefix = os.path.basename(args.target)
integrations_full_prefix = f'{prefix}/{INTEGRATIONS_PREFIX}'
scripts_full_prefix = f'{prefix}/{SCRIPTS_PREFIX}'
playbooks_full_prefix = f'{prefix}/{PLAYBOOKS_PREFIX}'
releases_full_prefix = f'{prefix}/{RELEASES_PREFIX}'
articles_full_prefix = f'{prefix}/{ARTICLES_PREFIX}'
integration_doc_infos = create_docs(args.dir, args.target, INTEGRATION_DOCS_MATCH, INTEGRATIONS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_INTEGRATIONS_PREFIX)
playbooks_doc_infos = create_docs(args.dir, args.target, PLAYBOOKS_DOCS_MATCH, PLAYBOOKS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_PLAYBOOKS_PREFIX)
script_doc_infos = create_docs(args.dir, args.target, SCRIPTS_DOCS_MATCH, SCRIPTS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_SCRIPTS_PREFIX)
release_doc_infos = create_releases(args.target)
article_doc_infos = create_articles(args.target)
if os.getenv('SKIP_DEPRECATED') not in ('true', 'yes', '1'):
add_deprected_integrations_info(args.dir, f'{args.target}/{ARTICLES_PREFIX}/deprecated.md', DEPRECATED_INFO_FILE,
f'{args.target}/../../static/assets')
index_base = f'{os.path.dirname(os.path.abspath(__file__))}/reference-index.md'
index_target = args.target + '/index.md'
articles_index_target = args.target + '/articles-index.md'
articles_index_base = f'{os.path.dirname(os.path.abspath(__file__))}/articles-index.md'
shutil.copy(index_base, index_target)
shutil.copy(articles_index_base, articles_index_target)
with open(index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write("\n\n## Integrations\n\n")
f.write(index_doc_infos(integration_doc_infos, INTEGRATIONS_PREFIX))
f.write("\n\n## Playbooks\n\n")
f.write(index_doc_infos(playbooks_doc_infos, PLAYBOOKS_PREFIX))
f.write("\n\n## Scripts\n\n")
f.write(index_doc_infos(script_doc_infos, SCRIPTS_PREFIX))
f.write("\n\n## API Reference\n\n")
api_docs: List[DocInfo] = [
DocInfo('demisto-class', 'Demisto Class',
'The object exposes a series of API methods which are used to retrieve and send data to the Cortex XSOAR Server.', ''),
DocInfo('common-server-python', 'Common Server Python',
'Common functions that will be appended to the code of each integration/script before being executed.', ''),
]
f.write(index_doc_infos(api_docs, 'api'))
f.write("\n\n## Content Release Notes\n\n")
f.write(index_doc_infos(release_doc_infos, RELEASES_PREFIX, headers=('Name', 'Date')))
f.write("\n\nAdditional archived release notes are available"
" [here](https://github.com/demisto/content-docs/tree/master/content-repo/extra-docs/releases).")
with open(articles_index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write(index_doc_infos(article_doc_infos, ARTICLES_PREFIX))
integration_items = [f'{integrations_full_prefix}/{d.id}' for d in integration_doc_infos]
playbook_items = [f'{playbooks_full_prefix}/{d.id}' for d in playbooks_doc_infos]
script_items = [f'{scripts_full_prefix}/{d.id}' for d in script_doc_infos]
article_items = [f'{articles_full_prefix}/{d.id}' for d in article_doc_infos]
article_items.insert(0, f'{prefix}/articles-index')
release_items = [f'{releases_full_prefix}/{d.id}' for d in release_doc_infos]
sidebar = [
{
"type": "doc",
"id": f'{prefix}/index'
},
{
"type": "category",
"label": "Integrations",
"items": integration_items
},
{
"type": "category",
"label": "Playbooks",
"items": playbook_items
},
{
"type": "category",
"label": "Scripts",
"items": script_items
},
{
"type": "category",
"label": "Content | |
is_master_gpu:
filtered_cost_volume_tower = RegNetUS0({'data': cost_volume}, is_training=True, reuse=tf.AUTO_REUSE)
else:
filtered_cost_volume_tower = RegNetUS0({'data': cost_volume}, is_training=True, reuse=True)
filtered_cost_volume = tf.squeeze(filtered_cost_volume_tower.get_output(), axis=-1)
# depth map by softArgmin
with tf.name_scope('soft_arg_min'):
# probability volume by soft max
probability_volume = tf.nn.softmax(tf.scalar_mul(-1, filtered_cost_volume),
axis=1, name='prob_volume')
# depth image by soft argmin
volume_shape = tf.shape(probability_volume)
soft_2d = []
for i in range(FLAGS.batch_size):
soft_1d = tf.linspace(depth_start[i], depth_end[i], tf.cast(depth_num, tf.int32))
soft_2d.append(soft_1d)
soft_2d = tf.reshape(tf.stack(soft_2d, axis=0), [volume_shape[0], volume_shape[1], 1, 1])
soft_4d = tf.tile(soft_2d, [1, 1, volume_shape[2], volume_shape[3]])
estimated_depth_map = tf.reduce_sum(soft_4d * probability_volume, axis=1)
estimated_depth_map = tf.expand_dims(estimated_depth_map, axis=3)
# probability map
prob_map = get_propability_map(probability_volume, estimated_depth_map, depth_start, depth_interval)
# filtered_depth_map = tf.cast(tf.greater_equal(prob_map, 0.8), dtype='float32') * estimated_depth_map
return estimated_depth_map, prob_map
def depth_refine(init_depth_map, image, depth_num, depth_start, depth_interval, is_master_gpu=True):
""" refine depth image with the image """
# normalization parameters
depth_shape = tf.shape(init_depth_map)
depth_end = depth_start + (tf.cast(depth_num, tf.float32) - 1) * depth_interval
depth_start_mat = tf.tile(tf.reshape(
depth_start, [depth_shape[0], 1, 1, 1]), [1, depth_shape[1], depth_shape[2], 1])
depth_end_mat = tf.tile(tf.reshape(
depth_end, [depth_shape[0], 1, 1, 1]), [1, depth_shape[1], depth_shape[2], 1])
depth_scale_mat = depth_end_mat - depth_start_mat
# normalize depth map (to 0~1)
init_norm_depth_map = tf.div(init_depth_map - depth_start_mat, depth_scale_mat)
# resize normalized image to the same size of depth image
resized_image = tf.image.resize_bilinear(image, [depth_shape[1], depth_shape[2]])
# refinement network
if is_master_gpu:
norm_depth_tower = RefineNet({'color_image': resized_image, 'depth_image': init_norm_depth_map},
is_training=True, reuse=tf.AUTO_REUSE)
else:
norm_depth_tower = RefineNet({'color_image': resized_image, 'depth_image': init_norm_depth_map},
is_training=True, reuse=True)
norm_depth_map = norm_depth_tower.get_output()
# denormalize depth map
refined_depth_map = tf.multiply(norm_depth_map, depth_scale_mat) + depth_start_mat
return refined_depth_map
def non_zero_mean_absolute_diff(y_true, y_pred, interval):
""" non zero mean absolute loss for one batch """
with tf.name_scope('MAE'):
shape = tf.shape(y_pred)
interval = tf.reshape(interval, [shape[0]])
mask_true = tf.cast(tf.not_equal(y_true, 0.0), dtype='float32')
denom = tf.reduce_sum(mask_true, axis=[1, 2, 3]) + 1e-7
masked_abs_error = tf.abs(mask_true * (y_true - y_pred)) # 4D
masked_mae = tf.reduce_sum(masked_abs_error, axis=[1, 2, 3]) # 1D
masked_mae = tf.reduce_sum((masked_mae / interval) / denom) # 1
return masked_mae
def less_one_percentage(y_true, y_pred, interval):
""" less one accuracy for one batch """
with tf.name_scope('less_one_error'):
shape = tf.shape(y_pred)
mask_true = tf.cast(tf.not_equal(y_true, 0.0), dtype='float32')
denom = tf.reduce_sum(mask_true) + 1e-7
interval_image = tf.tile(tf.reshape(interval, [shape[0], 1, 1, 1]), [1, shape[1], shape[2], 1])
abs_diff_image = tf.abs(y_true - y_pred) / interval_image
less_one_image = mask_true * tf.cast(tf.less_equal(abs_diff_image, 1.0), dtype='float32')
return tf.reduce_sum(less_one_image) / denom
def less_three_percentage(y_true, y_pred, interval):
""" less three accuracy for one batch """
with tf.name_scope('less_three_error'):
shape = tf.shape(y_pred)
mask_true = tf.cast(tf.not_equal(y_true, 0.0), dtype='float32')
denom = tf.reduce_sum(mask_true) + 1e-7
interval_image = tf.tile(tf.reshape(interval, [shape[0], 1, 1, 1]), [1, shape[1], shape[2], 1])
abs_diff_image = tf.abs(y_true - y_pred) / interval_image
less_three_image = mask_true * tf.cast(tf.less_equal(abs_diff_image, 3.0), dtype='float32')
return tf.reduce_sum(less_three_image) / denom
def mvsnet_loss(estimated_disp_image, disp_image, depth_interval):
""" compute loss and accuracy """
# non zero mean absulote loss
masked_mae = non_zero_mean_absolute_diff(disp_image, estimated_disp_image, depth_interval)
# less one accuracy
less_one_accuracy = less_one_percentage(disp_image, estimated_disp_image, depth_interval)
# less three accuracy
less_three_accuracy = less_three_percentage(disp_image, estimated_disp_image, depth_interval)
return masked_mae, less_one_accuracy, less_three_accuracy
def inverse_warping(img,left_cam, right_cam, depth):
with tf.name_scope('inverse_warping'):
# cameras (K, R, t)
R_left = tf.slice(left_cam, [0, 0, 0, 0], [-1, 1, 3, 3])
R_right = tf.slice(right_cam, [0, 0, 0, 0], [-1, 1, 3, 3])
t_left = tf.slice(left_cam, [0, 0, 0, 3], [-1, 1, 3, 1])
t_right = tf.slice(right_cam, [0, 0, 0, 3], [-1, 1, 3, 1])
K_left = tf.slice(left_cam, [0, 1, 0, 0], [-1, 1, 3, 3])
K_right = tf.slice(right_cam, [0, 1, 0, 0], [-1, 1, 3, 3])
K_left = tf.squeeze(K_left,axis=1)
K_left_inv = tf.matrix_inverse(K_left)
R_left_trans = tf.transpose(tf.squeeze(R_left, axis=1), perm=[0, 2, 1])
R_right_trans = tf.transpose(tf.squeeze(R_right, axis=1), perm=[0, 2, 1])
R_left =tf.squeeze(R_left, axis=1)
t_left =tf.squeeze(t_left, axis=1)
R_right = tf.squeeze(R_right, axis=1)
t_right = tf.squeeze(t_right, axis=1)
## estimate egomotion by inverse composing R1,R2 and t1,t2
R_rel = tf.matmul(R_right,R_left_trans)
t_rel = tf.subtract(t_right, tf.matmul(R_rel,t_left))
## now convert R and t to transform mat, as in SFMlearner
batch_size = R_left.shape[0]
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([R_rel, t_rel], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
dims = tf.shape(img)
batch_size, img_height, img_width = dims[0], dims[1], dims[2]
depth = tf.reshape(depth, [batch_size, 1, img_height * img_width])
grid = _meshgrid_abs(img_height, img_width)
grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1])
cam_coords = _pixel2cam(depth, grid, K_left_inv)
ones = tf.ones([batch_size, 1, img_height * img_width])
cam_coords_hom = tf.concat([cam_coords, ones], axis=1)
# Get projection matrix for target camera frame to source pixel frame
hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
hom_filler = tf.tile(hom_filler, [batch_size, 1, 1])
intrinsic_mat_hom = tf.concat(
[K_left, tf.zeros([batch_size, 3, 1])], axis=2)
intrinsic_mat_hom = tf.concat([intrinsic_mat_hom, hom_filler], axis=1)
proj_target_cam_to_source_pixel = tf.matmul(intrinsic_mat_hom, transform_mat)
source_pixel_coords = _cam2pixel(cam_coords_hom,
proj_target_cam_to_source_pixel)
source_pixel_coords = tf.reshape(source_pixel_coords,
[batch_size, 2, img_height, img_width])
source_pixel_coords = tf.transpose(source_pixel_coords, perm=[0, 2, 3, 1])
warped_right, mask = _spatial_transformer(img, source_pixel_coords)
return warped_right,mask
def _cam2pixel(cam_coords, proj_c2p):
"""Transform coordinates in the camera frame to the pixel frame."""
pcoords = tf.matmul(proj_c2p, cam_coords)
x = tf.slice(pcoords, [0, 0, 0], [-1, 1, -1])
y = tf.slice(pcoords, [0, 1, 0], [-1, 1, -1])
z = tf.slice(pcoords, [0, 2, 0], [-1, 1, -1])
x_norm = x / (z + 1e-10)
y_norm = y / (z + 1e-10)
pixel_coords = tf.concat([x_norm, y_norm], axis=1)
return pixel_coords
def _pixel2cam(depth, pixel_coords, intrinsic_mat_inv):
"""Transform coordinates in the pixel frame to the camera frame."""
cam_coords = tf.matmul(intrinsic_mat_inv, pixel_coords) * depth
return cam_coords
def _meshgrid_abs(height, width):
"""Meshgrid in the absolute coordinates."""
x_t = tf.matmul(
tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(
tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
return grid
def _spatial_transformer(img, coords):
"""A wrapper over binlinear_sampler(), taking absolute coords as input."""
img_height = tf.cast(tf.shape(img)[1], tf.float32)
img_width = tf.cast(tf.shape(img)[2], tf.float32)
px = coords[:, :, :, :1]
py = coords[:, :, :, 1:]
# Normalize coordinates to [-1, 1] to send to _bilinear_sampler.
px = px / (img_width - 1) * 2.0 - 1.0
py = py / (img_height - 1) * 2.0 - 1.0
output_img, mask = _bilinear_sampler(img, px, py)
return output_img, mask
def _bilinear_sampler(im, x, y, name='blinear_sampler'):
"""Perform bilinear sampling on im given list of x, y coordinates.
Implements the differentiable sampling mechanism with bilinear kernel
in https://arxiv.org/abs/1506.02025.
x,y are tensors specifying normalized coordinates [-1, 1] to be sampled on im.
For example, (-1, -1) in (x, y) corresponds to pixel location (0, 0) in im,
and (1, 1) in (x, y) corresponds to the bottom right pixel in im.
Args:
im: Batch of images with shape [B, h, w, channels].
x: Tensor of normalized x coordinates in [-1, 1], with shape [B, h, w, 1].
y: Tensor of normalized y coordinates in [-1, 1], with shape [B, h, w, 1].
name: Name scope for ops.
Returns:
Sampled image with shape [B, h, w, channels].
Principled mask with shape [B, h, w, 1], dtype:float32. A value of 1.0
in the mask indicates that the corresponding coordinate in the sampled
image is valid.
"""
with tf.variable_scope(name):
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# Constants.
batch_size = tf.shape(im)[0]
_, height, width, channels = im.get_shape().as_list()
x = tf.to_float(x)
y = tf.to_float(y)
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
zero = tf.constant(0, dtype=tf.int32)
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# Scale indices from [-1, 1] to [0, width - 1] or [0, height - 1].
x = (x + 1.0) * (width_f - 1.0) / 2.0
y = (y + 1.0) * (height_f - 1.0) / 2.0
# Compute the coordinates of the 4 pixels to sample from.
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
mask = tf.logical_and(
tf.logical_and(x0 >= zero, x1 <= max_x),
tf.logical_and(y0 >= zero, y1 <= max_y))
mask = tf.to_float(mask)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width * height
# Create base index.
base = tf.range(batch_size) * dim1
base = | |
#**************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#**************************************************************
import sys
import pyuno
try:
import __builtin__ as builtins
except:
import builtins
try:
unicode
bytes = str
bytearray = str
except NameError:
unicode = str
import socket # since on Windows sal3.dll no longer calls WSAStartup
# all functions and variables starting with a underscore (_) must be considered private
# and can be changed at any time. Don't use them
_g_ctx = pyuno.getComponentContext( )
_g_delegatee = builtins.__dict__["__import__"]
def getComponentContext():
""" returns the UNO component context, that was used to initialize the python runtime.
"""
return _g_ctx
def getConstantByName( constant ):
"Looks up the value of a idl constant by giving its explicit name"
return pyuno.getConstantByName( constant )
def getTypeByName( typeName):
""" returns a uno.Type instance of the type given by typeName. In case the
type does not exist, a com.sun.star.uno.RuntimeException is raised.
"""
return pyuno.getTypeByName( typeName )
def createUnoStruct( typeName, *args ):
"""creates a uno struct or exception given by typeName. The parameter args may
1) be empty. In this case, you get a default constructed uno structure.
( e.g. createUnoStruct( "com.sun.star.uno.Exception" ) )
2) be a sequence with exactly one element, that contains an instance of typeName.
In this case, a copy constructed instance of typeName is returned
( e.g. createUnoStruct( "com.sun.star.uno.Exception" , e ) )
3) be a sequence, where the length of the sequence must match the number of
elements within typeName (e.g.
createUnoStruct( "com.sun.star.uno.Exception", "foo error" , self) ). The
elements with in the sequence must match the type of each struct element,
otherwise an exception is thrown.
"""
return getClass(typeName)( *args )
def getClass( typeName ):
"""returns the class of a concrete uno exception, struct or interface
"""
return pyuno.getClass(typeName)
def isInterface( obj ):
"""returns true, when obj is a class of a uno interface"""
return pyuno.isInterface( obj )
def generateUuid():
"returns a 16 byte sequence containing a newly generated uuid or guid, see rtl/uuid.h "
return pyuno.generateUuid()
def systemPathToFileUrl( systemPath ):
"returns a file-url for the given system path"
return pyuno.systemPathToFileUrl( systemPath )
def fileUrlToSystemPath( url ):
"returns a system path (determined by the system, the python interpreter is running on)"
return pyuno.fileUrlToSystemPath( url )
def absolutize( path, relativeUrl ):
"returns an absolute file url from the given urls"
return pyuno.absolutize( path, relativeUrl )
def getCurrentContext():
"""Returns the currently valid current context.
see http://udk.openoffice.org/common/man/concept/uno_contexts.html#current_context
for an explanation on the current context concept
"""
return pyuno.getCurrentContext()
def setCurrentContext( newContext ):
"""Sets newContext as new uno current context. The newContext must
implement the XCurrentContext interface. The implemenation should
handle the desired properties and delegate unknown properties to the
old context. Ensure to reset the old one when you leave your stack ...
see http://udk.openoffice.org/common/man/concept/uno_contexts.html#current_context
"""
return pyuno.setCurrentContext( newContext )
class Enum:
"Represents a UNO idl enum, use an instance of this class to explicitly pass a boolean to UNO"
#typeName the name of the enum as a string
#value the actual value of this enum as a string
def __init__(self,typeName, value):
self.typeName = typeName
self.value = value
pyuno.checkEnum( self )
def __repr__(self):
return "<uno.Enum %s (%r)>" % (self.typeName, self.value)
def __eq__(self, that):
if not isinstance(that, Enum):
return False
return (self.typeName == that.typeName) and (self.value == that.value)
class Type:
"Represents a UNO type, use an instance of this class to explicitly pass a boolean to UNO"
# typeName # Name of the UNO type
# typeClass # python Enum of TypeClass, see com/sun/star/uno/TypeClass.idl
def __init__(self, typeName, typeClass):
self.typeName = typeName
self.typeClass = typeClass
pyuno.checkType(self)
def __repr__(self):
return "<Type instance %s (%r)>" % (self.typeName, self.typeClass)
def __eq__(self, that):
if not isinstance(that, Type):
return False
return self.typeClass == that.typeClass and self.typeName == that.typeName
def __hash__(self):
return self.typeName.__hash__()
class Bool(object):
"""Represents a UNO boolean, use an instance of this class to explicitly
pass a boolean to UNO.
Note: This class is deprecated. Use python's True and False directly instead
"""
def __new__(cls, value):
if isinstance(value, (str, unicode)) and value == "true":
return True
if isinstance(value, (str, unicode)) and value == "false":
return False
if value:
return True
return False
class Char:
"Represents a UNO char, use an instance of this class to explicitly pass a char to UNO"
# @param value pass a Unicode string with length 1
def __init__(self,value):
assert isinstance(value, unicode)
assert len(value) == 1
self.value=value
def __repr__(self):
return "<Char instance %s>" % (self.value, )
def __eq__(self, that):
if isinstance(that, (str, unicode)):
if len(that) > 1:
return False
return self.value == that[0]
if isinstance(that, Char):
return self.value == that.value
return False
class ByteSequence:
def __init__(self, value):
if isinstance(value, (bytes, bytearray)):
self.value = value
elif isinstance(value, ByteSequence):
self.value = value.value
else:
raise TypeError("expected string or bytesequence")
def __repr__(self):
return "<ByteSequence instance '%s'>" % (self.value, )
def __eq__(self, that):
if isinstance( that, ByteSequence):
return self.value == that.value
elif isinstance(that, (bytes, bytearray)):
return self.value == that
return False
def __len__(self):
return len(self.value)
def __getitem__(self, index):
return self.value[index]
def __iter__( self ):
return self.value.__iter__()
def __add__( self , b ):
if isinstance( b, (bytes, bytearray) ):
return ByteSequence( self.value + b )
elif isinstance( b, ByteSequence ):
return ByteSequence( self.value + b.value )
raise TypeError( "expected string or ByteSequence as operand" )
def __hash__( self ):
return self.value.hash()
class Any:
"use only in connection with uno.invoke() to pass an explicit typed any"
def __init__(self, type, value ):
if isinstance( type, Type ):
self.type = type
else:
self.type = getTypeByName( type )
self.value = value
def invoke( object, methodname, argTuple ):
"use this function to pass exactly typed anys to the callee (using uno.Any)"
return pyuno.invoke( object, methodname, argTuple )
#---------------------------------------------------------------------------------------
# don't use any functions beyond this point, private section, likely to change
#---------------------------------------------------------------------------------------
#def _uno_import( name, globals={}, locals={}, fromlist=[], level=-1 ):
def _uno_import( name, *optargs, **kwargs ):
try:
# print "optargs = " + repr(optargs)
return _g_delegatee( name, *optargs, **kwargs )
except ImportError:
# process optargs
globals, locals, fromlist = list(optargs)[:3] + [kwargs.get('globals',{}), kwargs.get('locals',{}), kwargs.get('fromlist',[])][len(optargs):]
if not fromlist:
raise
modnames = name.split( "." )
mod = None
d = sys.modules
for x in modnames:
if x in d:
mod = d[x]
else:
mod = pyuno.__class__(x) # How to create a module ??
d = mod.__dict__
RuntimeException = pyuno.getClass( "com.sun.star.uno.RuntimeException" )
for x in fromlist:
if x not in d:
if x.startswith( "typeOf" ):
try:
d[x] = pyuno.getTypeByName( name + "." + x[6:len(x)] )
except RuntimeException as e:
raise ImportError( "type " + name + "." + x[6:len(x)] +" is unknown" )
else:
try:
# check for structs, exceptions or interfaces
d[x] = pyuno.getClass( name + "." + x )
except RuntimeException as e:
# check for enums
try:
d[x] = Enum( name , x )
except RuntimeException as e2:
# check for constants
try:
d[x] = getConstantByName( name + "." + x )
except RuntimeException as e3:
# no known uno type !
raise ImportError( "type "+ name + "." +x + " is unknown" )
return mod
# hook into the __import__ chain
builtins.__dict__["__import__"] = _uno_import
# private, referenced from the pyuno shared library
def _uno_struct__init__(self,*args):
if len(args) == 1 and hasattr(args[0], "__class__") and args[0].__class__ == self.__class__ :
self.__dict__["value"] = args[0]
else:
self.__dict__["value"] = pyuno._createUnoStructHelper(self.__class__.__pyunostruct__,args)
# private, referenced from the pyuno shared library
def _uno_struct__getattr__(self,name):
return getattr(self.__dict__["value"],name)
# private, referenced from the pyuno shared library
def _uno_struct__setattr__(self,name,value):
return setattr(self.__dict__["value"],name,value)
# private, referenced from the pyuno shared library
def _uno_struct__repr__(self):
return repr(self.__dict__["value"])
def _uno_struct__str__(self):
return str(self.__dict__["value"])
# private, referenced from the pyuno | |
<filename>desktop/core/ext-py/Twisted/twisted/trial/reporter.py<gh_stars>10-100
# -*- test-case-name: twisted.trial.test.test_reporter -*-
#
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: <NAME>
"""
Defines classes that handle the results of tests.
"""
import sys, os
import time
import warnings
from twisted.python.compat import set
from twisted.python import reflect, log
from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from twisted.python.util import untilConcludes
from twisted.trial import itrial, util
from zope.interface import implements
pyunit = __import__('unittest')
class BrokenTestCaseWarning(Warning):
"""emitted as a warning when an exception occurs in one of
setUp, tearDown, setUpClass, or tearDownClass"""
class SafeStream(object):
"""
Wraps a stream object so that all C{write} calls are wrapped in
L{untilConcludes}.
"""
def __init__(self, original):
self.original = original
def __getattr__(self, name):
return getattr(self.original, name)
def write(self, *a, **kw):
return untilConcludes(self.original.write, *a, **kw)
class TestResult(pyunit.TestResult, object):
"""
Accumulates the results of several L{twisted.trial.unittest.TestCase}s.
@ivar successes: count the number of successes achieved by the test run.
@type successes: C{int}
"""
implements(itrial.IReporter)
def __init__(self):
super(TestResult, self).__init__()
self.skips = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.successes = 0
self._timings = []
def __repr__(self):
return ('<%s run=%d errors=%d failures=%d todos=%d dones=%d skips=%d>'
% (reflect.qual(self.__class__), self.testsRun,
len(self.errors), len(self.failures),
len(self.expectedFailures), len(self.skips),
len(self.unexpectedSuccesses)))
def _getTime(self):
return time.time()
def _getFailure(self, error):
"""
Convert a C{sys.exc_info()}-style tuple to a L{Failure}, if necessary.
"""
if isinstance(error, tuple):
return Failure(error[1], error[0], error[2])
return error
def startTest(self, test):
"""
This must be called before the given test is commenced.
@type test: L{pyunit.TestCase}
"""
super(TestResult, self).startTest(test)
self._testStarted = self._getTime()
def stopTest(self, test):
"""
This must be called after the given test is completed.
@type test: L{pyunit.TestCase}
"""
super(TestResult, self).stopTest(test)
self._lastTime = self._getTime() - self._testStarted
def addFailure(self, test, fail):
"""
Report a failed assertion for the given test.
@type test: L{pyunit.TestCase}
@type fail: L{Failure} or L{tuple}
"""
self.failures.append((test, self._getFailure(fail)))
def addError(self, test, error):
"""
Report an error that occurred while running the given test.
@type test: L{pyunit.TestCase}
@type error: L{Failure} or L{tuple}
"""
self.errors.append((test, self._getFailure(error)))
def addSkip(self, test, reason):
"""
Report that the given test was skipped.
In Trial, tests can be 'skipped'. Tests are skipped mostly because there
is some platform or configuration issue that prevents them from being
run correctly.
@type test: L{pyunit.TestCase}
@type reason: L{str}
"""
self.skips.append((test, reason))
def addUnexpectedSuccess(self, test, todo):
"""Report that the given test succeeded against expectations.
In Trial, tests can be marked 'todo'. That is, they are expected to fail.
When a test that is expected to fail instead succeeds, it should call
this method to report the unexpected success.
@type test: L{pyunit.TestCase}
@type todo: L{unittest.Todo}
"""
# XXX - 'todo' should just be a string
self.unexpectedSuccesses.append((test, todo))
def addExpectedFailure(self, test, error, todo):
"""Report that the given test failed, and was expected to do so.
In Trial, tests can be marked 'todo'. That is, they are expected to fail.
@type test: L{pyunit.TestCase}
@type error: L{Failure}
@type todo: L{unittest.Todo}
"""
# XXX - 'todo' should just be a string
self.expectedFailures.append((test, error, todo))
def addSuccess(self, test):
"""Report that the given test succeeded.
@type test: L{pyunit.TestCase}
"""
self.successes += 1
def upDownError(self, method, error, warn, printStatus):
warnings.warn("upDownError is deprecated in Twisted 8.0.",
category=DeprecationWarning, stacklevel=3)
def cleanupErrors(self, errs):
"""Report an error that occurred during the cleanup between tests.
"""
warnings.warn("Cleanup errors are actual errors. Use addError. "
"Deprecated in Twisted 8.0",
category=DeprecationWarning, stacklevel=2)
def startSuite(self, name):
warnings.warn("startSuite deprecated in Twisted 8.0",
category=DeprecationWarning, stacklevel=2)
def endSuite(self, name):
warnings.warn("endSuite deprecated in Twisted 8.0",
category=DeprecationWarning, stacklevel=2)
def done(self):
"""
The test suite has finished running.
"""
class TestResultDecorator(proxyForInterface(itrial.IReporter,
"_originalReporter")):
"""
Base class for TestResult decorators.
@ivar _originalReporter: The wrapped instance of reporter.
@type _originalReporter: A provider of L{itrial.IReporter}
"""
implements(itrial.IReporter)
class UncleanWarningsReporterWrapper(TestResultDecorator):
"""
A wrapper for a reporter that converts L{util.DirtyReactorError}s
to warnings.
"""
implements(itrial.IReporter)
def addError(self, test, error):
"""
If the error is a L{util.DirtyReactorError}, instead of
reporting it as a normal error, throw a warning.
"""
if (isinstance(error, Failure)
and error.check(util.DirtyReactorAggregateError)):
warnings.warn(error.getErrorMessage())
else:
self._originalReporter.addError(test, error)
class _AdaptedReporter(TestResultDecorator):
"""
TestResult decorator that makes sure that addError only gets tests that
have been adapted with a particular test adapter.
"""
def __init__(self, original, testAdapter):
"""
Construct an L{_AdaptedReporter}.
@param original: An {itrial.IReporter}.
@param testAdapter: A callable that returns an L{itrial.ITestCase}.
"""
TestResultDecorator.__init__(self, original)
self.testAdapter = testAdapter
def addError(self, test, error):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addError(test, error)
def addExpectedFailure(self, test, failure, todo):
"""
See L{itrial.IReporter}.
"""
return self._originalReporter.addExpectedFailure(
self.testAdapter(test), failure, todo)
def addFailure(self, test, failure):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addFailure(test, failure)
def addSkip(self, test, skip):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addSkip(test, skip)
def addUnexpectedSuccess(self, test, todo):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addUnexpectedSuccess(test, todo)
def startTest(self, test):
"""
See L{itrial.IReporter}.
"""
return self._originalReporter.startTest(self.testAdapter(test))
def stopTest(self, test):
"""
See L{itrial.IReporter}.
"""
return self._originalReporter.stopTest(self.testAdapter(test))
class Reporter(TestResult):
"""
A basic L{TestResult} with support for writing to a stream.
@ivar _startTime: The time when the first test was started. It defaults to
C{None}, which means that no test was actually launched.
@type _startTime: C{float} or C{NoneType}
@ivar _warningCache: A C{set} of tuples of warning message (file, line,
text, category) which have already been written to the output stream
during the currently executing test. This is used to avoid writing
duplicates of the same warning to the output stream.
@type _warningCache: C{set}
@ivar _publisher: The log publisher which will be observed for warning
events.
@type _publisher: L{LogPublisher} (or another type sufficiently similar)
"""
implements(itrial.IReporter)
_separator = '-' * 79
_doubleSeparator = '=' * 79
def __init__(self, stream=sys.stdout, tbformat='default', realtime=False,
publisher=None):
super(Reporter, self).__init__()
self._stream = SafeStream(stream)
self.tbformat = tbformat
self.realtime = realtime
self._startTime = None
self._warningCache = set()
# Start observing log events so as to be able to report warnings.
self._publisher = publisher
if publisher is not None:
publisher.addObserver(self._observeWarnings)
def _observeWarnings(self, event):
"""
Observe warning events and write them to C{self._stream}.
This method is a log observer which will be registered with
C{self._publisher.addObserver}.
@param event: A C{dict} from the logging system. If it has a
C{'warning'} key, a logged warning will be extracted from it and
possibly written to C{self.stream}.
"""
if 'warning' in event:
key = (event['filename'], event['lineno'],
event['category'].split('.')[-1],
str(event['warning']))
if key not in self._warningCache:
self._warningCache.add(key)
self._stream.write('%s:%s: %s: %s\n' % key)
def stream(self):
warnings.warn("stream is deprecated in Twisted 8.0.",
category=DeprecationWarning, stacklevel=2)
return self._stream
stream = property(stream)
def separator(self):
warnings.warn("separator is deprecated in Twisted 8.0.",
category=DeprecationWarning, stacklevel=2)
return self._separator
separator = property(separator)
def startTest(self, test):
"""
Called when a test begins to run. Records the time when it was first
called and resets the warning cache.
@param test: L{ITestCase}
"""
super(Reporter, self).startTest(test)
if self._startTime is None:
self._startTime = self._getTime()
self._warningCache = set()
def addFailure(self, test, fail):
"""
Called when a test fails. If L{realtime} is set, then it prints the
error to the stream.
@param test: L{ITestCase} that failed.
@param fail: L{failure.Failure} containing the error.
"""
super(Reporter, self).addFailure(test, fail)
if self.realtime:
fail = self.failures[-1][1] # guarantee it's a Failure
self._write(self._formatFailureTraceback(fail))
def addError(self, test, error):
"""
Called when a test raises an error. If L{realtime} is set, then it
prints the error to the stream.
@param test: L{ITestCase} that raised the error.
@param error: L{failure.Failure} containing the error.
"""
error = self._getFailure(error)
super(Reporter, self).addError(test, error)
if self.realtime:
error = self.errors[-1][1] # guarantee it's a Failure
self._write(self._formatFailureTraceback(error))
def write(self, format, *args):
warnings.warn("write is deprecated in Twisted 8.0.",
category=DeprecationWarning, stacklevel=2)
self._write(format, *args)
def _write(self, format, *args):
"""
Safely write to the reporter's stream.
@param format: A format string to write.
@param *args: The arguments for the format string.
"""
s = str(format)
assert isinstance(s, type(''))
if args:
self._stream.write(s % args)
else:
self._stream.write(s)
untilConcludes(self._stream.flush)
def writeln(self, format, *args):
warnings.warn("writeln is deprecated in Twisted 8.0.",
category=DeprecationWarning, stacklevel=2)
self._writeln(format, *args)
def _writeln(self, format, *args):
"""
Safely write a line to the reporter's stream. Newline is appended to
the format string.
@param format: A format string to write.
@param *args: The arguments for the format string.
"""
self._write(format, *args)
self._write('\n')
def upDownError(self, method, error, warn, printStatus):
super(Reporter, self).upDownError(method, error, warn, printStatus)
if warn:
tbStr = | |
in range(int(self._quad_cells[index1][1].y), int(self._quad_cells[index1][2].y)):
if y_left in range(self._quad_cells[index2][0].y, self._quad_cells[index2][3].y):
same_boundary.append(index2)
break
temp = self._quad_cells[index1][0:4]
centroid_vertex = centroid(temp)
place = centroid_vertex.find_point(graph_vertices)
if (place == -1):
graph_vertices.append(centroid_vertex)
if (len(same_boundary) == 1):
temp_edge_middle = centroid([self._quad_cells[index1][1], self._quad_cells[index1][2]])
graph_vertices.append(temp_edge_middle)
n = len(graph_vertices) - 1
if (place != -1):
graph_edges.append([place, n])
graph_vertices_cell_connection[place] = index1
else:
graph_edges.append([n - 1, n])
graph_vertices_cell_connection[n - 1] = index1
temp = self._quad_cells[same_boundary[0]][0:4]
curr_centroid_vertex = centroid(temp)
place2 = curr_centroid_vertex.find_point(graph_vertices)
if (place2 == -1):
graph_vertices.append(curr_centroid_vertex)
graph_edges.append([n, n + 1])
else:
graph_edges.append([n, place2])
elif (len(same_boundary) > 1):
n = len(graph_vertices) - 1
if (place != -1):
use = place
else:
use = n
for index, i in enumerate(same_boundary):
temp = self._quad_cells[i][0:4]
curr_centroid_vertex = centroid(temp)
temp_edge_middle = centroid([self._quad_cells[i][0], self._quad_cells[i][3]])
graph_vertices.append(temp_edge_middle)
pl1 = len(graph_vertices) - 1
hmmm = curr_centroid_vertex.find_point(graph_vertices)
if (hmmm == -1):
graph_vertices.append(curr_centroid_vertex)
pl2 = len(graph_vertices) - 1
else:
pl2 = hmmm
graph_edges.append([use, pl1])
graph_edges.append([pl1, pl2])
graph_vertices_cell_connection[use] = index1
else:
graph_vertices_cell_connection[place] = index1
# Convert graph in adjacency list format
graph = []
for j in range(len(graph_vertices)):
graph.append([])
for i in graph_edges:
if (i[0] == j):
graph[j].append(i[1])
elif (i[1] == j):
graph[j].append(i[0])
path = bfs(graph, len(graph_vertices) - 2, len(graph_vertices) - 1)
if (path is None):
print("No path found. Sorry")
else:
print("Path found.")
# plt.plot(i.x,i.y, marker="x")
graph_edges_without_bounds = []
for index, i in enumerate(graph_edges):
if index % 2 == 0:
graph_edges_without_bounds.append([i[0], graph_edges[index + 1][1]])
return graph_edges_without_bounds, graph_vertices, graph_vertices_cell_connection
def find_cell_lines(self):
"""
Find vertical lines and obstacle intersections of each line.
:return:
"""
lines_and_obstacles = {}
for current_x in sorted(self._all_vertices):
lines_and_obstacles[current_x] = {}
previous_vertice_id = []
for vertice_id, vertice in enumerate(sorted(self._all_vertices[current_x], key=lambda x: x.y)):
obstacle_id = vertice.obstacle
if not obstacle_id in lines_and_obstacles[current_x]:
lines_and_obstacles[current_x][obstacle_id] = {
vertice_id: {"point": vertice, "start": None, "end": None}}
else:
lines_and_obstacles[current_x][obstacle_id][vertice_id] = {"point": vertice, "start": None,
"end": None}
self_intersection = {"bottom": [], "top": []}
for edge in self._obstacle_edges[obstacle_id]:
if current_x in range(edge[0][0] - 1, edge[1][0] + 1):
res_bottom = segment_intersection(point(current_x, 0), vertice, point(edge[0][0], edge[0][1]),
point(edge[1][0], edge[1][1]))
res_top = segment_intersection(vertice, point(current_x, self._boundary[2].y),
point(edge[0][0], edge[0][1]),
point(edge[1][0], edge[1][1]))
if res_bottom != -1:
if res_bottom.y in range(-1, vertice.y):
if res_bottom.y != vertice.y:
self_intersection["bottom"].append(res_bottom.y)
self_intersection["bottom"].sort()
if res_top != -1:
if res_top.y in range(vertice.y, self._boundary[2].y + 1):
if res_top.y != vertice.y:
self_intersection["top"].append(res_top.y)
self_intersection["top"].sort()
two_vertice_on_same_line = False
if len(self_intersection["bottom"]) > 0:
if [[current_x, vertice.y, obstacle_id],
[current_x, self_intersection["bottom"][-1], obstacle_id]] in \
self._obstacle_edges[obstacle_id]:
previous_vertice_id = [index for index, x in
enumerate(sorted(self._all_vertices[current_x], key=lambda x: x.y)) if
x.equals(point(current_x, self_intersection["bottom"][-1], obstacle_id))]
if self._obstacle_limits[obstacle_id]["x_low"] == vertice.x or \
self._obstacle_limits[obstacle_id][
"x_high"] == vertice.x:
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] = vertice
two_vertice_on_same_line = True
elif previous_vertice_id[0] < vertice_id:
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] = \
lines_and_obstacles[current_x][obstacle_id][previous_vertice_id[0]]["start"]
previous_vertice_id = []
elif not Polygon([x[:2] for x in self._obstacles[obstacle_id]]).contains(
Point(current_x, (vertice.y + self_intersection["bottom"][-1]) / 2)):
obstacle_index_for_point = -1
if [current_x, self_intersection["bottom"][-1], obstacle_id] in self._obstacles[obstacle_id]:
obstacle_index_for_point = obstacle_id
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] = point(current_x,
self_intersection[
"bottom"][
-1],
obstacle_index_for_point)
else:
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] = vertice
if len(self_intersection["top"]) > 0:
lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"] = vertice
intersections = [[0, -1], [self._boundary[2].y, -1]]
intersections_only_y = [0, self._boundary[2].y]
if lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"] is None or \
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] is None:
for inter_obstacle_id in self._obstacle_edges:
if inter_obstacle_id != obstacle_id:
for edge in self._obstacle_edges[inter_obstacle_id]:
if current_x in range(edge[0][0] - 1, edge[1][0] + 1):
res_all = segment_intersection(point(current_x, 0),
point(current_x, self._boundary[2].y),
point(edge[0][0], edge[0][1]),
point(edge[1][0], edge[1][1]))
if res_all != -1:
intersections.append([res_all.y, inter_obstacle_id])
intersections_only_y.append(res_all.y)
intersections.sort()
if lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"] is None:
near_point = [y for y in intersections if y[0] >= vertice.y][0]
lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"] = point(current_x, near_point[0],
near_point[1])
if lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] is None:
near_point = [y for y in intersections if y[0] <= vertice.y][-1]
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"] = point(current_x, near_point[0],
near_point[1])
if two_vertice_on_same_line:
first_previous_vertice_id = [index for index, x in
enumerate(sorted(self._all_vertices[current_x], key=lambda x: x.y)) if
x.equals(
point(current_x, self_intersection["bottom"][0], obstacle_id))]
top_bound = lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"].y
lines_and_obstacles[current_x][obstacle_id][vertice_id + 10] = {
"start": lines_and_obstacles[current_x][obstacle_id][first_previous_vertice_id[0]]["start"],
"point": point(current_x,
(vertice.y + self._all_vertices[current_x][previous_vertice_id[0]].y) / 2,
obstacle_id),
"end": point(current_x, top_bound, obstacle_id)
}
lines_and_obstacles[current_x][obstacle_id][vertice_id + 10]["from-obstacle"] = obstacle_id
previous_vertice_id = []
if len(previous_vertice_id) > 0 and previous_vertice_id[0] == vertice_id:
lines_and_obstacles[current_x][obstacle_id][vertice_id - 1]["start"] = \
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"]
previous_vertice_id = []
if lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"].y in intersections_only_y and \
lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"].y in intersections_only_y and \
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"].y != \
lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"].y:
upper_y = deepcopy(lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"].y)
upper_y_obstacle = deepcopy(lines_and_obstacles[current_x][obstacle_id][vertice_id]["end"].obstacle)
lower_y = deepcopy(lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"].y)
lower_y_obstacle = deepcopy(
lines_and_obstacles[current_x][obstacle_id][vertice_id]["start"].obstacle)
if (vertice_id + 10) in lines_and_obstacles[current_x][obstacle_id]:
upper_y = deepcopy(lines_and_obstacles[current_x][obstacle_id][vertice_id + 10]["end"].y)
upper_y_obstacle = deepcopy(
lines_and_obstacles[current_x][obstacle_id][vertice_id + 10]["end"].obstacle)
lower_y = deepcopy(lines_and_obstacles[current_x][obstacle_id][vertice_id + 10]["start"].y)
lower_y_obstacle = deepcopy(
lines_and_obstacles[current_x][obstacle_id][vertice_id + 10]["start"].obstacle)
lines_and_obstacles[current_x][obstacle_id][vertice_id + 1] = {
"start": deepcopy(vertice),
"point": deepcopy(vertice),
"end": point(current_x, upper_y, upper_y_obstacle)
}
lines_and_obstacles[current_x][obstacle_id][vertice_id + 1]["point"].y = (vertice.y + upper_y) / 2
if lines_and_obstacles[current_x][obstacle_id][vertice_id + 1][
"point"].obstacle != upper_y_obstacle:
lines_and_obstacles[current_x][obstacle_id][vertice_id + 1]["point"].obstacle = -1
lines_and_obstacles[current_x][obstacle_id][vertice_id + 1]["from-obstacle"] = obstacle_id
lines_and_obstacles[current_x][obstacle_id][vertice_id] = {
"start": point(current_x, lower_y, lower_y_obstacle),
"point": deepcopy(vertice),
"end": deepcopy(vertice)
}
lines_and_obstacles[current_x][obstacle_id][vertice_id]["point"].y = lower_y + (
vertice.y - lower_y) / 2
lines_and_obstacles[current_x][obstacle_id][vertice_id + 2] = {
"start": point(current_x, lower_y, lower_y_obstacle),
"point": point(current_x, upper_y, upper_y_obstacle),
"end": point(current_x, upper_y, upper_y_obstacle)
}
lines_and_obstacles[current_x][obstacle_id][vertice_id + 2]["all-line"] = True
lines_and_obstacles[current_x][obstacle_id][vertice_id + 2]["from-obstacle"] = obstacle_id
for obstacle_index in lines_and_obstacles[current_x]:
lines_and_obstacles[current_x][obstacle_index] = OrderedDict(
sorted(lines_and_obstacles[current_x][obstacle_index].items(), key=lambda x: x[1]["point"].y))
return lines_and_obstacles
def process_input_file(self, file_name, boust):
"""
Get map information from given file and calculate vertical cell decomposition for given map.
:param file_name:
:return:
"""
# dpi level of saved graph - Higher for high resolution output--
# True - euler tour graph will rendered to file
print_graph = False
new_path = "./prints/" + "input_file_6" + "/bottle_locations.txt"
if os.path.exists(new_path):
file_handler = codecs.open(new_path, "r", encoding="utf-8")
raw_data = file_handler.read()
raw_data = raw_data.split("\n")
for line in raw_data:
bottle_data = line.split(",")
self._input_file = file_name
raw_data = self.load_raw_data()
new_path = "./prints/" + self._input_file
if not os.path.exists(new_path):
os.makedirs(new_path)
# Extract vertices----------------------------------------------
temp = self.parse_input_line(raw_data[0])
self._boundary = [point(i[0], i[1]) for i in temp]
# Extract obstacles
for i in raw_data[1:len(raw_data) - 1]:
self._obstacles.append(self.parse_input_line(i))
obstacle_vertices_as_points = []
# sort by x-values
for index, i in enumerate(self._obstacles):
for j in i:
j.append(index)
temp = point(j[0], j[1], j[2])
obstacle_vertices_as_points.append(temp)
# Draw the problem
self.draw_problem()
# -----------------------------------------------------------
# Find and set obstacle vertices and obstacle edges
for index, i in enumerate(self._obstacles):
self._obstacle_edges[index] = []
self._simple_obstacle_edges[index] = []
self._obstacle_limits[index] = {"x_low": 1000000, "x_high": -1}
for vertex_count, j in enumerate(i):
if j[0] in self._all_vertices:
self._all_vertices[j[0]].append(point(j[0], j[1], index))
else:
self._all_vertices[j[0]] = [point(j[0], j[1], index)]
# set obstacle edges
vertex = deepcopy(j)
if vertex_count > 0:
edge_start = deepcopy(i[vertex_count - 1])
self._obstacle_edges[index].append([edge_start, vertex])
self._obstacle_edges[index].append([vertex, edge_start])
self._simple_obstacle_edges[index].append(
[Point(edge_start[0], edge_start[1]), Point(vertex[0], vertex[1])])
if vertex_count == len(i) - 1:
edge_start = deepcopy(i[0])
self._obstacle_edges[index].append([edge_start, vertex])
self._obstacle_edges[index].append([vertex, edge_start])
self._simple_obstacle_edges[index].append(
[Point(edge_start[0], edge_start[1]), Point(vertex[0], vertex[1])])
if self._obstacle_limits[index]["x_low"] > j[0]:
self._obstacle_limits[index]["x_low"] = j[0]
if self._obstacle_limits[index]["x_high"] < j[0]:
self._obstacle_limits[index]["x_high"] = j[0]
# -----------------------------------------------------------
# find and set obstacle vertices and obstacle edges
lines_and_obstacles = self.find_cell_lines()
# -----------------------------------------------------------
# Find vertical limits of env
y_limit_lower = self._boundary[0].y
y_limit_upper = self._boundary[2].y
# ------------------------------------------------------
# Find Polygon cells naively.
lines_and_obstacles = OrderedDict(sorted(lines_and_obstacles.items()))
cells = self.find_cells(lines_and_obstacles)
cells = self.sort_cells(cells)
if boust:
cells[1] = [
cells[1][0],
cells[2][0],
cells[3][0],
cells[4][0],
cells[5][0],
cells[5][1],
cells[5][2],
cells[5][3],
cells[4][3],
cells[3][3],
cells[2][3],
cells[1][2],
cells[1][3],
]
cells.pop(2)
cells.pop(2)
cells.pop(2)
cells.pop(2)
cells[6] = [
cells[6][0],
cells[7][1],
cells[7][2],
cells[7][3],
cells[6][2],
cells[6][3]
]
cells.pop(7)
# -------------------------------------------------------
# Merge overlapping Polygons
self._quad_cells = cells
# ------------------------------------------------------
# Add boundary lines
if self._boundary[0].x != obstacle_vertices_as_points[0].x:
self._quad_cells.append(
[self._boundary[0], point(obstacle_vertices_as_points[0].x, y_limit_lower),
point(obstacle_vertices_as_points[0].x, y_limit_upper),
self._boundary[3]])
if self._boundary[1].x != obstacle_vertices_as_points[-1].x:
self._quad_cells.append(
[point(sorted(obstacle_vertices_as_points, key=lambda x: x.x)[-1].x, y_limit_lower),
self._boundary[1],
self._boundary[2],
point(sorted(obstacle_vertices_as_points, key=lambda x: x.x)[-1].x, y_limit_upper)])
# -------------------------------------------------------
# Plot final cells
self._quad_cells = self.sort_cells(self._quad_cells)
# print cells
self.draw_cells()
plt.savefig("./prints/" + self._input_file + "/map_with_cells.png", dpi=self.dpi)
# ----------------------------------------------------------------------
# Calculate and draw graph
if os.path.exists("./maps/" + self._input_file + ".png"):
fig, ax = plt.subplots()
self.draw_cells()
img = plt.imread("./maps/" + self._input_file + ".png")
ax.imshow(img, origin='lower')
plt.savefig("./prints/" + self._input_file + "/map_with_image.png", dpi=self.dpi)
graph_edges, graph_vertices, graph_vertices_cell_connection = self.calculate_graph(plt)
tour = find_euler_tour(graph_edges)
visited_cells = {}
last_visited = 0
empty_connection = False
text_file = open("./prints/" + self._input_file + "/tour_explanation.txt", "w")
self._quad_cells = []
for index, i in enumerate(cells):
i.append(i[0])
self._quad_cells.append(i)
text_file.write("cell {0}: {1} {2} {3} {4}".format(index, i[0], i[1], i[2], i[3]))
text_file.write("\n")
if print_graph:
# Draw everything--------------
for index, i in enumerate(graph_vertices):
plt.annotate(str(index), xy=(i.x, i.y), xytext=(i.x + 2, i.y - 2))
# plt.plot(i.x,i.y, marker="x")
for index, i in enumerate(graph_edges):
temp_x = [graph_vertices[i[0]].x, graph_vertices[i[1]].x]
temp_y = [graph_vertices[i[0]].y, graph_vertices[i[1]].y]
plt.plot(temp_x, temp_y)
# print | |
<reponame>smchartrand/MarkovProcess_Bedload<filename>BeRCM/model/logic.py<gh_stars>1-10
from __future__ import division
import math
import random
import numpy as np
import sympy as sy
import copy
#TODO: Refactor functions so that they don't reach into the paramters file
from collections import defaultdict
from codetiming import Timer
import logging
# TODO: Consider refactoring from class into simple struct
class Subregion():
""" Subregion class.
Each instance of Subregion contains
the name, and the left and right
boundaries of a subregion.
Name and boundaries are set during
instantiation and can be retrieved
afterwards using helper methods.
"""
def __init__(self, name, left_boundary, right_boundary):
self.name = name
self.left_boundary = left_boundary
self.right_boundary = right_boundary
def leftBoundary(self):
return self.left_boundary
def rightBoundary(self):
return self.right_boundary
def getName(self):
return self.name
#%% Bed-related functionss
# TODO: consider if Bed (and Model) functions should be refactored into classes
# @Timer("Get_event_particles", text="get_event_particles call: {:.3f} seconds", logger=None)
def get_event_particles(e_events, subregions, model_particles, level_limit, height_dependant=False):
""" Find and return list of particles to be entrained
Keyword arguments:
e_events -- Number of events requested per subregion
subregions -- List of Subregion objects
model_particles -- The model's model_particles array
Returns:
total_e_events -- Number of events over entire stream
event_particles -- List of particles to be entrained
"""
if e_events == 0:
e_events = 1 #???
event_particles = []
for subregion in subregions:
# Filter array for only active, in-stream particles per subregion
subregion_particles = model_particles[
(model_particles[:,0] >= subregion.leftBoundary())
& (model_particles[:,0] <= subregion.rightBoundary())]
in_stream_particles = subregion_particles[
subregion_particles[:,0] != -1]
active_particles = in_stream_particles[
in_stream_particles[:,4] != 0]
subregion_event_ids = []
if height_dependant:
# TODO: better approach to identify the level/elevation relationship. This is messy
levels = elevation_list(subregion_particles[:,2], desc=False)
tip_particles = []
if len(levels) == level_limit: # or anything greater #TODO: THIS IS BAD -- active particles != stream elevations
tip_particles = active_particles[active_particles[:,2] == levels[level_limit-1]]
for particle in tip_particles:
subregion_event_ids.append(particle[3])
active_particles = active_particles[active_particles[:,2] != particle[2]]
# e_events = e_events - 1
# if e_events > 0:
if e_events > len(active_particles):
random_sample = random.sample(range(len(active_particles)),
len(active_particles))
else:
random_sample = random.sample(range(len(active_particles)),
e_events)
for index in random_sample:
#NOTE: this only works because index=id in the model_particle array
subregion_event_ids.append(int(active_particles[index][3]) )
ghost_particles = np.where(model_particles[:,0] == -1)[0]
for index in ghost_particles:
model_particles[index][0] = 0
subregion_event_ids.append(index)
if e_events != len(subregion_event_ids):
msg = (
f'Requested {e_events} events in {subregion.getName()} '
f'but {len(subregion_event_ids)} are occuring'
)
logging.warning(msg)
event_particles = event_particles + subregion_event_ids
event_particles = np.array(event_particles, dtype=np.intp)
# print(event_particles)
return event_particles
# @Timer("define_subregions", text="define_subregions call: {:.3f} seconds", logger=None)
def define_subregions(bed_length, num_subregions):
""" Define subregion list for model stream.
Keyword arguments:
bed_length -- The length of the model bed.
subregions -- The number of subregions to create.
Returns:
subregions_arr -- The np array of Subregions
"""
# TODO: Catch failure of assertion
assert(math.remainder(bed_length, num_subregions) == 0)
subregion_length = bed_length/num_subregions
left_boundary = 0
subregions_arr = []
for region in range(num_subregions):
right_boundary = left_boundary + subregion_length
subregion = Subregion(f'subregion_{region}', left_boundary, right_boundary)
left_boundary = right_boundary
subregions_arr.append(subregion)
return subregions_arr
# TODO: This does not need to be an independant function
# Should merge model and bed particle array builders into one.
# This could aid maintainace/change of the array structure.
def add_bed_particle(diam, bed_particles, particle_id, pack_idx):
""" Add 'particle' to the bed particle list.
Calculates center and elevation of particle
from input. Maintains pack_idx and particle_id
for next particle iteration.
Builds particle of the following structure:
[0] = center coordinate,
[1] = diameter,
[2] = elevation,
[3] = pack_idx,
[4] = active (boolean)
[5] = age counter
[6] = loop age counter
Keyword arguments:
diam -- diameter of the pa
bed_particles -- current list of bed particles
particle_id -- index into bed_particles list
pack_idx -- left-most extent of particle
"""
center = pack_idx + (diam/2)
state = 0
age = 0
loop_age = 0
elevation = 0
bed_particles[particle_id] = [center, diam, elevation, pack_idx, state, age, loop_age]
# update build parameters
pack_idx += diam
particle_id += 1
return particle_id, pack_idx
# @Timer("build_streambed", text="build_streambed call: {:.5f} seconds", logger=None)
def build_streambed(x_max, set_diam):
""" Build the bed particle list.
Handles calls to add_bed_particle, checks for
completness of bed and updates the x-extent
of stream when the packing exceeds/under packs
within 8mm range.
Note: the updates to x-extent are only required
when variable particle diameter is being used.
Return values:
bed_particles -- list of bed particles
bed_vertices -- list of available vertices
based on bed list
"""
max_particles = int(math.ceil( x_max / set_diam ))
bed_particles = np.zeros([max_particles, 7],dtype=float)
running_id = 0
running_pack_idx = 0
# This probably doesn't need to be a loop. NumPy!
while True:
running_id, running_pack_idx = add_bed_particle(set_diam,
bed_particles,
running_id,
running_pack_idx)
if bed_complete(running_pack_idx, x_max):
break
else: continue
# Bed packing does not always match x_max. Adjust if off
bed_max = int(math.ceil(bed_particles[running_id-1][1]
+ bed_particles[running_id-1][3]))
if x_max != bed_max:
msg = (
f'Bed packing could not match x_max parameter... Updating '
f'x_max to match packing extent: {bed_max}....'
)
logging.warning(msg)
x_max = bed_max
else: x_max = x_max
# strip zero element particles tuples from the original array
valid = ((bed_particles==0).all(axis=(1)))
bed_particles = bed_particles[~valid]
return bed_particles, x_max
def bed_complete(pack_idx, x_max):
"""Check to see if bed is complete based on model params."""
# similarly, if np.count_nonzero(bed_space) == x_max
if pack_idx >= x_max:
return 1
else: return 0
# End bed-related function definitions
#%% Entrainment and model particle related functions
def determine_num_particles(pack_frac, num_vertices):
"""Return the number of model particles to be used, based on
the packing fraction"""
num_particles = num_vertices * pack_frac
num_particles = int(math.ceil(num_particles))
return num_particles
# @Timer("place_particle", text="place_particle call: {:.5f} seconds", logger=None)
# Second answer: https://math.stackexchange.com/questions/2293201/
def place_particle(particle, model_particles, bed_particles, h):
""" Calculate new X and Y of particle based on location in stream.
Provided a particle's (pA) location (xA) in stream,
search for 2 supporting particles (n1, n2) that pA might
come into contact with when placed at xA.
Calculate the Y position of pA based on height of n1, n2
supporting particles. The resulting X position
will always be xA.
Keyword arguments:
placement_idx -- considered particles locaiton (pA)
particle_diam -- diameter of considered particle
model_particles -- model particle list
bed_particles -- bed particle list
"""
left_support, right_support = find_supports(particle, model_particles,
bed_particles, already_placed=False)
# # # TODO: Make this more readable
# x1 = left_support[0]
# y1 = left_support[2]
# r1 = left_support[1] / 2
# x2 = right_support[0]
# y2 = right_support[2]
# r2 = right_support[1] / 2
# rp = particle_diam / 2
# define symbols for symbolic system solution using SymPy
# x3, y3 = sy.symbols('x3 y3')
# # create the symbolic system equations
# eq1 = sy.Eq(sy.sqrt((x1-x3)**2 + (y1-y3)**2)-r1-rp, 0)
# eq2 = sy.Eq(sy.sqrt((x2-x3)**2 + (y2-y3)**2)-r2-rp, 0)
# # solve the system of equations
# sol_dict = sy.solve((eq1, eq2), (x3, y3))
# # Iterate into the solution dictionary to recieve new particle center (x,y)
# # Account for 'extra precision' differences by rounding to nearest 100th
# p_x = round((sol_dict[1][0]), 2)
# p_y = round((sol_dict[1][1]), 2)
return round(particle[0], 2), round(np.add(h, left_support[2]), 2)
# @Timer("update_states", text="update_particle_states call: {:.5f} seconds", logger=None)
def update_particle_states(model_particles, bed_particles):
""" Set each model particle's current 'active' state.
If any model particle (pX) has a particle
resting on it in the stream then pX must
be set to Inactive indicated by a boolean 0.
If pX does not have any particles resting
on top of it then it is considered Active
indicated by a boolean 1.
Note: bed particles are always considered
Inactive.
Keyword arguments:
model_particles -- model particle list
bed_particles -- bed particle list
"""
# Set all model particles to active
model_particles[:,4] = 1
in_stream_particles = model_particles[model_particles[:,0] != -1]
for particle in in_stream_particles:
left_neighbour, right_neighbour | |
<filename>lib/googlecloudsdk/third_party/apis/apikeys/v2/apikeys_v2_messages.py
"""Generated message classes for apikeys version v2.
Manages the API keys associated with developer projects.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'apikeys'
class ApikeysKeysLookupKeyRequest(_messages.Message):
r"""A ApikeysKeysLookupKeyRequest object.
Fields:
keyString: Required. Finds the project that owns the key string value.
"""
keyString = _messages.StringField(1)
class ApikeysOperationsGetRequest(_messages.Message):
r"""A ApikeysOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class ApikeysProjectsLocationsKeysCloneRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysCloneRequest object.
Fields:
name: Required. The resource name of the API key to be cloned in the same
project.
v2CloneKeyRequest: A V2CloneKeyRequest resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
v2CloneKeyRequest = _messages.MessageField('V2CloneKeyRequest', 2)
class ApikeysProjectsLocationsKeysCreateRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysCreateRequest object.
Fields:
keyId: User specified key id (optional). If specified, it will become the
final component of the key resource name. The id must be unique within
the project, must conform with RFC-1034, is restricted to lower-cased
letters, and has a maximum length of 63 characters. In another word, the
id must match the regular expression: `[a-z]([a-z0-9-]{0,61}[a-z0-9])?`.
The id must NOT be a UUID-like string.
parent: Required. The project in which the API key is created.
v2Key: A V2Key resource to be passed as the request body.
"""
keyId = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
v2Key = _messages.MessageField('V2Key', 3)
class ApikeysProjectsLocationsKeysDeleteRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysDeleteRequest object.
Fields:
etag: Optional. The etag known to the client for the expected state of the
key. This is to be used for optimistic concurrency.
name: Required. The resource name of the API key to be deleted.
"""
etag = _messages.StringField(1)
name = _messages.StringField(2, required=True)
class ApikeysProjectsLocationsKeysGetKeyStringRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysGetKeyStringRequest object.
Fields:
name: Required. The resource name of the API key to be retrieved.
"""
name = _messages.StringField(1, required=True)
class ApikeysProjectsLocationsKeysGetRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysGetRequest object.
Fields:
name: Required. The resource name of the API key to get.
"""
name = _messages.StringField(1, required=True)
class ApikeysProjectsLocationsKeysListRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysListRequest object.
Fields:
filter: Optional. Only list keys that conform to the specified filter. The
allowed filter strings are `state:ACTIVE` and `state:DELETED`. By
default, ListKeys returns only active keys.
pageSize: Optional. Specifies the maximum number of results to be returned
at a time.
pageToken: Optional. Requests a specific page of results.
parent: Required. Lists all API keys associated with this project.
"""
filter = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
parent = _messages.StringField(4, required=True)
class ApikeysProjectsLocationsKeysPatchRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysPatchRequest object.
Fields:
name: Output only. The resource name of the key. The `name` has the form:
`projects//locations/global/keys/`. For example: `projects/123456867718/
locations/global/keys/b7ff1f9f-8275-410a-94dd-3855ee9b5dd2` NOTE: Key is
a global resource; hence the only supported value for location is
`global`.
updateMask: The field mask specifies which fields to be updated as part of
this request. All other fields are ignored. Mutable fields are:
`display_name` and `restrictions`. If an update mask is not provided,
the service treats it as an implied mask equivalent to all allowed
fields that are set on the wire. If the field mask has a special value
"*", the service treats it equivalent to replace all allowed mutable
fields.
v2Key: A V2Key resource to be passed as the request body.
"""
name = _messages.StringField(1, required=True)
updateMask = _messages.StringField(2)
v2Key = _messages.MessageField('V2Key', 3)
class ApikeysProjectsLocationsKeysUndeleteRequest(_messages.Message):
r"""A ApikeysProjectsLocationsKeysUndeleteRequest object.
Fields:
name: Required. The resource name of the API key to be undeleted.
v2UndeleteKeyRequest: A V2UndeleteKeyRequest resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
v2UndeleteKeyRequest = _messages.MessageField('V2UndeleteKeyRequest', 2)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success. If
the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download | |
<reponame>lserraga/labUGR<gh_stars>1-10
from __future__ import division, print_function, absolute_import
import warnings
from distutils.version import LooseVersion
from numpy.testing import suppress_warnings
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_equal, assert_,
assert_allclose, assert_warns)
from numpy.testing import assert_almost_equal
import pytest
from pytest import raises as assert_raises
from labugr.filters.filters import (normalize, tf2zpk, zpk2tf,
BadCoefficients, lfilter, lfilter_zi, filtfilt,
_filtfilt_gust)
from labugr.filters.iir_filters import butter
from decimal import Decimal
class TestTf2zpk(object):
def test_simple(self):
z_r = np.array([0.5, -0.5])
p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
# Sort the zeros/poles so that we don't fail the test if the order
# changes
z_r.sort()
p_r.sort()
b = np.poly(z_r)
a = np.poly(p_r)
z, p, k = tf2zpk(b, a)
z.sort()
p.sort()
assert_array_almost_equal(z, z_r)
assert_array_almost_equal(p, p_r)
def test_bad_filter(self):
# Regression test for #651: better handling of badly conditioned
# filter coefficients.
with suppress_warnings():
warnings.simplefilter("error", BadCoefficients)
assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
class TestZpk2Tf(object):
def test_identity(self):
"""Test the identity transfer function."""
z = []
p = []
k = 1.
b, a = zpk2tf(z, p, k)
b_r = np.array([1.]) # desired result
a_r = np.array([1.]) # desired result
# The test for the *type* of the return values is a regression
# test for ticket #1095. In the case p=[], zpk2tf used to
# return the scalar 1.0 instead of array([1.0]).
assert_array_equal(b, b_r)
assert_(isinstance(b, np.ndarray))
assert_array_equal(a, a_r)
assert_(isinstance(a, np.ndarray))
class TestNormalize(object):
def test_allclose(self):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
assert_array_almost_equal(b_matlab, b_output, decimal=13)
assert_array_almost_equal(a_matlab, a_output, decimal=13)
def test_errors(self):
"""Test the error cases."""
# all zero denominator
assert_raises(ValueError, normalize, [1, 2], 0)
# denominator not 1 dimensional
assert_raises(ValueError, normalize, [1, 2], [[1]])
# numerator too many dimensions
assert_raises(ValueError, normalize, [[[1, 2]]], 1)
class _TestLinearFilter(object):
def generate(self, shape):
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
return self.convert_dtype(x)
def convert_dtype(self, arr):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
return np.array(arr, self.dtype, copy=False)
def test_rank_1_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, -0.5])
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_IIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([0.5, -0.5])
zi = self.convert_dtype([1, 2])
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
zf_r = self.convert_dtype([13, -10])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_1_FIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1, 1])
a = self.convert_dtype([1])
zi = self.convert_dtype([1, 1])
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
zf_r = self.convert_dtype([9, 5])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]])
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]])
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank_2_IIR_axis_0_init_cond(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((4,1)))
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]])
zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_1_init_cond(self):
x = self.generate((4,3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((1,3)))
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]])
zf_r = self.convert_dtype([[-23, -23, -23]])
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_IIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_FIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_FIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1, 1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_zi_pseudobroadcast(self):
x = self.generate((4, 5, 20))
b,a = butter(8, 0.2, output='ba')
b = self.convert_dtype(b)
a = self.convert_dtype(a)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
def test_scalar_a(self):
# a can be a scalar.
x = self.generate(6)
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
def test_zi_some_singleton_dims(self):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(np.zeros((3,2,5), 'l'))
b = self.convert_dtype(np.ones(5, 'l'))
a = self.convert_dtype(np.array([1,0,0]))
zi = np.ones((3,1,4), 'l')
zi[1,:,:] *= 2
zi[2,:,:] *= 3
zi = self.convert_dtype(zi)
zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
y_expected = np.zeros((3,2,5), 'l')
y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
y_expected = self.convert_dtype(y_expected)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi):
b = self.convert_dtype(b)
a = self.convert_dtype(a)
x = self.convert_dtype(x)
zi = self.convert_dtype(zi)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
def test_bad_size_zi(self):
# rank 1
x1 = np.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
self.base_bad_size_zi([1, 1], [1], | |
Error.multi_context() as errors:
stdlib = StdLib.Base(self.effective_wdl_version)
# Pass through input & postinput declarations again, typecheck their
# right-hand side expressions against the type environment.
for decl in (self.inputs or []) + self.postinputs:
errors.try1(
lambda: decl.typecheck(
type_env, stdlib, check_quant=check_quant, struct_types=struct_types
)
)
# Typecheck the command (string)
errors.try1(
lambda: self.command.infer_type(
type_env, stdlib, check_quant=check_quant, struct_types=struct_types
).typecheck(Type.String())
)
for b in self.available_inputs:
errors.try1(lambda: _check_serializable_map_keys(b.value.type, b.name, b.value))
# Typecheck runtime expressions
for _, runtime_expr in self.runtime.items():
errors.try1(
lambda runtime_expr=runtime_expr: runtime_expr.infer_type(
type_env, stdlib, check_quant=check_quant, struct_types=struct_types
)
) # .typecheck()
# (At this stage we don't care about the overall expression type, just that it
# typechecks internally.)
# Add output declarations to type environment
for decl in self.outputs:
type_env2 = errors.try1(
lambda decl=decl: decl.add_to_type_env(struct_types, type_env)
)
if type_env2:
type_env = type_env2
errors.maybe_raise()
# Typecheck the output expressions
stdlib = StdLib.TaskOutputs(self.effective_wdl_version)
for decl in self.outputs:
errors.try1(
lambda: decl.typecheck(type_env, stdlib, struct_types, check_quant=check_quant)
)
errors.try1(lambda: _check_serializable_map_keys(decl.type, decl.name, decl))
# check for cyclic dependencies among decls
_detect_cycles(
# pyre-ignore
_decl_dependency_matrix([ch for ch in self.children if isinstance(ch, Decl)])
)
_digest: str = ""
@property
def digest(self) -> str:
"""
Content digest of the task, for use e.g. as a cache key. The digest is an opaque string of
a few dozen alphanumeric characters, sensitive to the task's source code (with best effort
to exclude comments and whitespace).
"""
if self._digest:
return self._digest
sha256 = hashlib.sha256(self._digest_source().encode("utf-8")).digest()
self._digest = base64.b32encode(sha256[:20]).decode().lower()
return self._digest
def _digest_source(self) -> str:
doc = getattr(self, "parent", None)
assert isinstance(doc, Document)
# For now we just excerpt the task's source code, minus comments and blank lines, plus
# annotations for the WDL version and struct types.
source_lines = []
if doc.wdl_version:
source_lines.append("version " + doc.wdl_version)
# Insert comments describing struct types used in the task.
structs = _describe_struct_types(self)
for struct_name in sorted(structs.keys()):
source_lines.append(f"# {struct_name} :: {structs[struct_name]}")
# excerpt task{} from document
# Possible future improvements:
# excise the meta & parameter_meta sections
# normalize order of declarations
# normalize whitespace within lines (not leading/trailing)
source_lines += _source_excerpt(doc, self.pos, [self.command.pos])
return "\n".join(source_lines).strip()
class Call(WorkflowNode):
"""A call (within a workflow) to a task or sub-workflow"""
callee_id: List[str]
"""
:type: List[str]
The called task; either one string naming a task in the current document, or an import
namespace and task name.
"""
name: str
""":type: string
Call name, defaults to task/workflow name"""
after: List[str]
""":type: string
Call names on which this call depends (even if none of their outputs are used in this call's
inputs)
"""
_after_node_ids: Set[str]
inputs: Dict[str, Expr.Base]
"""
:type: Dict[str,WDL.Expr.Base]
Call inputs provided"""
callee: Optional[Union[Task, "Workflow"]]
"""
:type: Union[WDL.Tree.Task, WDL.Tree.Workflow]
Refers to the ``Task`` or imported ``Workflow`` object to be called (after AST typechecking)"""
def __init__(
self,
pos: SourcePosition,
callee_id: List[str],
alias: Optional[str],
inputs: Dict[str, Expr.Base],
after: Optional[List[str]] = None,
) -> None:
assert callee_id
self.callee_id = callee_id
self.name = alias if alias is not None else self.callee_id[-1]
super().__init__("call-" + self.name, pos)
self.inputs = inputs
self.callee = None
self.after = after if after is not None else list()
self._after_node_ids = set()
@property
def children(self) -> Iterable[SourceNode]:
""""""
for _, ex in self.inputs.items():
yield ex
def resolve(self, doc: "Document") -> None:
# Set self.callee to the Task/Workflow being called. Use exactly once
# prior to add_to_type_env() or typecheck_input()
if self.callee:
return
callee_doc = None
if len(self.callee_id) == 1:
callee_doc = doc
elif len(self.callee_id) == 2:
for imp in doc.imports:
if imp.namespace == self.callee_id[0]:
callee_doc = imp.doc
if callee_doc:
assert isinstance(callee_doc, Document)
wf = callee_doc.workflow
if isinstance(wf, Workflow) and wf.name == self.callee_id[-1]:
if callee_doc is doc:
raise Error.CircularDependencies(self)
if not wf.complete_calls or (wf.outputs is None and wf.effective_outputs):
raise Error.UncallableWorkflow(self, ".".join(self.callee_id))
self.callee = wf
else:
for task in callee_doc.tasks:
if task.name == self.callee_id[-1]:
self.callee = task
if self.callee is None:
raise Error.NoSuchTask(self, ".".join(self.callee_id))
assert doc.workflow
if self.name == doc.workflow.name:
raise Error.MultipleDefinitions(
self, "Call's name may not equal the containing workflow's"
)
assert isinstance(self.callee, (Task, Workflow))
def add_to_type_env(
self, struct_types: Env.Bindings[Dict[str, Type.Base]], type_env: Env.Bindings[Type.Base]
) -> Env.Bindings[Type.Base]:
# Add the call's outputs to the type environment under the appropriate
# namespace, after checking for namespace collisions.
assert self.callee
if self.name in type_env:
raise Error.MultipleDefinitions(self, "Value/call name collision on " + self.name)
if type_env.has_namespace(self.name):
raise Error.MultipleDefinitions(
self,
"Workflow has multiple calls named {}; give calls distinct names using `call {} as NAME ...`".format(
self.name, self.callee.name
),
)
# add a dummy _present binding to ensure the namespace exists even if callee has no outputs
return Env.merge(
self.effective_outputs, type_env.bind(self.name + "." + "_present", Type.Any(), self)
)
def typecheck_input(
self,
struct_types: Env.Bindings[Dict[str, Type.Base]],
type_env: Env.Bindings[Type.Base],
stdlib: StdLib.Base,
check_quant: bool,
) -> bool:
# Check the input expressions against the callee's inputs. One-time use.
# Returns True if the call supplies all required inputs, False otherwise.
assert self.callee
# first resolve each self.after to a node ID (possibly a Gather node)
for call_after in self.after:
try:
self._after_node_ids.add(
type_env.resolve_binding(call_after + "._present").info.workflow_node_id
)
except KeyError:
raise Error.NoSuchCall(self, call_after)
# Make a set of the input names which are required for this call
required_inputs = set(decl.name for decl in self.callee.required_inputs)
# typecheck call inputs against task/workflow input declarations
with Error.multi_context() as errors:
for name, expr in self.inputs.items():
try:
decl = self.callee.available_inputs[name]
# treat input with default as optional, with or without the ? type quantifier
decltype = decl.type.copy(optional=True) if decl.expr else decl.type
errors.try1(
lambda expr=expr, decltype=decltype: expr.infer_type(
type_env, stdlib, check_quant=check_quant, struct_types=struct_types
).typecheck(decltype)
)
except KeyError:
errors.append(Error.NoSuchInput(expr, name))
if name in required_inputs:
required_inputs.remove(name)
assert (not required_inputs) == (not list(self.required_inputs))
return not required_inputs
@property
def available_inputs(self) -> Env.Bindings[Decl]:
""":type: WDL.Env.Bindings[WDL.Tree.Decl]
Yields the task/workflow inputs which are *not* supplied in the call
``inputs:``, and thus may be supplied at workflow launch; in namespaces
according to the call names.
"""
assert self.callee
supplied_inputs = set(self.inputs.keys())
return self.callee.available_inputs.filter(
lambda b: b.name not in supplied_inputs
).wrap_namespace(self.name)
@property
def required_inputs(self) -> Env.Bindings[Decl]:
""":type: WDL.Env.Bindings[WDL.Tree.Decl]
Yields the required task/workflow inputs which are *not* supplied in
the call ``inputs:`` (incomplete calls), and thus must be supplied at
workflow launch; in namespaces according to the call name.
"""
assert self.callee
supplied_inputs = set(self.inputs.keys())
return self.callee.required_inputs.filter(
lambda b: b.name not in supplied_inputs
).wrap_namespace(self.name)
@property
def effective_outputs(self) -> Env.Bindings[Type.Base]:
""":type: WDL.Env.Bindings[WDL.Tree.Decl]
Yields the effective outputs of the callee Task or Workflow, in a
namespace according to the call name.
"""
ans = Env.Bindings()
assert self.callee
for outp in reversed(list(self.callee.effective_outputs)):
ans = ans.bind(self.name + "." + outp.name, outp.value, self)
return ans
def _workflow_node_dependencies(self) -> Iterable[str]:
assert (not self.after) == (not self._after_node_ids)
yield from self._after_node_ids
for expr in self.inputs.values():
yield from _expr_workflow_node_dependencies(expr)
class Gather(WorkflowNode):
"""
A ``Gather`` node symbolizes the operation to gather an array of declared values or call
outputs in a scatter section, or optional values from a conditional section. These operations
are implicit in the WDL syntax, but explicating them in the AST facilitates analysis of the
workflow's data types and dependency structure.
Each scatter/conditional section provides ``Gather`` nodes to expose the section body's
products to the rest of the workflow. When a :class:`WDL.Expr.Ident` elsewhere identifies a
node inside the section, its ``referee`` attribute is the corresponding ``Gather`` node, which
in turn references the interior node. The interior node might itself be another ``Gather``
node, from a nested scatter/conditional section.
"""
section: "WorkflowSection"
"""
:type: WorkflowSection
The ``Scatter``/``Conditional`` section implying this Gather operation
"""
referee: "Union[Decl, Call, Gather]"
"""
:type: Union[Decl, Call, Gather]
The ``Decl``, ``Call``, or sub-``Gather`` node from which this operation "gathers"
"""
def __init__(self, section: "WorkflowSection", referee: "Union[Decl, Call, Gather]") -> None:
super().__init__("gather-" + referee.workflow_node_id, referee.pos)
self.section = section
self.referee = referee
def add_to_type_env(
self, struct_types: Env.Bindings[Dict[str, Type.Base]], type_env: Env.Bindings[Type.Base]
) -> Env.Bindings[Type.Base]:
raise NotImplementedError()
def _workflow_node_dependencies(self) -> Iterable[str]:
yield self.referee.workflow_node_id
@property
def children(self) -> Iterable[SourceNode]:
""""""
# section & referee are NOT 'children' of Gather
return []
@property
def final_referee(self) -> Union[Decl, Call]:
"""
The ``Decl`` or ``Call`` node found at the end of the referee chain through | |
(' }\n')
cfile.write (' return err;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write ('void ' + cCallbackName (ftr, cmd) + ' (')
for arg in cmd.args:
cfile.write (xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg, True) + ' ' + arg.name + ', ')
cfile.write ('void *custom)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = (ARCOMMANDS_JNI_Decoder_t *)custom;\n')
cfile.write (' jint res;\n')
cfile.write (' JNIEnv *env = NULL;\n')
cfile.write (' res = (*g_vm)->GetEnv (g_vm, (void **)&env, JNI_VERSION_1_6);\n')
cfile.write (' if (res < 0) { return; }\n')
cfile.write ('\n')
for arg in _get_args_multiset(cmd.args):
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE,'Decode'+ARCapitalize(ftr.name)+ARCapitalize(cmd.name))+' (decoder->nativeDecoder, '+arg.name+');\n')
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (' jstring j_' + arg.name + ' = (*env)->NewStringUTF (env, ' + arg.name + ');\n')
elif isinstance(arg.argType, ArEnum):
cfile.write (' jclass j_' + arg.name + '_class = (*env)->FindClass (env, "' + jniEnumClassName (ftr, cmd, arg) + '");\n')
cfile.write (' jmethodID j_' + arg.name + '_mid = (*env)->GetStaticMethodID (env, j_' + arg.name + '_class, "getFromValue", "(I)' + xmlToJavaSig(ftr, cmd, arg) + '");\n')
cfile.write (' jobject j_' + arg.name + '_enum = (*env)->CallStaticObjectMethod (env, j_' + arg.name + '_class, j_' + arg.name + '_mid, ' + arg.name + ');\n')
if not list(_get_args_multiset(cmd.args)):
cfile.write (' (*env)->CallVoidMethod (env, decoder->javaDecoder, '+jmethodeCbName (ftr, cmd))
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (', j_' + arg.name)
elif isinstance(arg.argType, ArEnum):
cfile.write (', j_' + arg.name + '_enum')
else:
cfile.write (', ' + xmlToJniCast(ftr, cmd, arg) + arg.name)
cfile.write (');\n')
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (' (*env)->DeleteLocalRef (env, j_' + arg.name + ');\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jlong JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeNewDecoder (' + JNI_FIRST_ARGS + ')\n')
cfile.write ('{\n')
cfile.write (' int failed = 0;\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = calloc(1, sizeof(ARCOMMANDS_JNI_Decoder_t));\n')
cfile.write (' if (decoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
cfile.write (' decoder->nativeDecoder = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'NewDecoder') + ' (NULL);\n')
cfile.write (' if (decoder->nativeDecoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
cfile.write (' decoder->javaDecoder = (*env)->NewGlobalRef(env, thizz);\n')
cfile.write (' if (decoder->javaDecoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' (decoder->nativeDecoder, ' + cCallbackName (ftr, cmd) + ', decoder);\n')
cfile.write ('\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if ((failed) && (decoder != NULL))\n')
cfile.write (' {\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (&decoder->nativeDecoder);\n')
cfile.write (' if (decoder->javaDecoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' (*env)->DeleteGlobalRef(env, decoder->javaDecoder);\n')
cfile.write (' }\n')
cfile.write (' free(decoder);\n')
cfile.write (' decoder = NULL;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return (jlong) (intptr_t) decoder;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT void JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeDeleteDecoder (' + JNI_FIRST_ARGS + ', jlong jdecoder)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = (ARCOMMANDS_JNI_Decoder_t *) (intptr_t)jdecoder;\n')
cfile.write ('\n')
cfile.write (' if (decoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (&decoder->nativeDecoder);\n')
cfile.write (' if (decoder->javaDecoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' (*env)->DeleteGlobalRef(env, decoder->javaDecoder);\n')
cfile.write (' }\n')
cfile.write (' free(decoder);\n')
cfile.write (' }\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('/* END OF GENERAED CODE */\n')
cfile.close ()
cfile = open (paths.JNI_FILTER_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <' + COMMANDSFIL_HFILE_NAME + '>\n')
cfile.write ('#include <jni.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jlong JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeNewFilter(' + JNI_FIRST_ARGS + ', jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (behavior, NULL);\n')
cfile.write (' return (jlong)(intptr_t)filter;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT void JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeDeleteFilter(' + JNI_FIRST_ARGS + ', jlong cFilter)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (&filter);\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeFilterCommand(' + JNI_FIRST_ARGS + ', jlong cFilter, jlong cCommand, jint len)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' uint8_t *command = (uint8_t *)(intptr_t)cCommand;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' status = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (filter, command, len, NULL);\n')
cfile.write (' return (jint)status;\n')
cfile.write ('}\n')
cfile.write ('\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
if ftr.classes:#project only
for cl in ftr.classes:
cfile.write (' // - Class ' + cl.name + '\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(format_cmd_name(cmd)) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('/* END OF GENERAED CODE */\n')
# Functions for tree file generation
# (Wireshark Plugin)
def dump_enum_table(ftr, cl, cmd, arg):
table = 'static struct arsdk_enum %s_%s_%s_%s_enum_tab[] = {\n' % (get_ftr_old_name(ftr), cl.name, cmd.name, arg.name)
value = 0
for enum in arg.enums:
comment = enum.doc.replace('\n', '\\n')
table += ' {\n'
table += ' .name = "%s",\n' % enum.name
table += ' .value = %s,\n' % AREnumValue(LIB_MODULE,
get_ftr_old_name(ftr).upper() + '_' +
cl.name.upper(),
cmd.name.upper() + '_' +
arg.name.upper(), enum.name)
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
value += 1
table = table + '};\n'
return table if arg.enums else ''
def dump_arg_table(ftr, cl, cmd):
table = 'static struct arsdk_arg %s_%s_%s_arg_tab[] = {\n' % (get_ftr_old_name(ftr),
cl.name,
cmd.name)
for arg in cmd.args:
comment = get_arg_doc(arg).replace('\n', '\\n')
if len(arg.enums) > 0:
enums = '%s_%s_%s_%s_enum_tab' % (get_ftr_old_name(ftr),
cl.name,
cmd.name,
arg.name)
nenums = 'ARRAY_SIZE(%s)' % enums
else:
enums = 'NULL'
nenums = '0'
table += ' {\n'
table += ' .name = "%s",\n' % arg.name
if isinstance(arg.argType, ArEnum):
table += ' .type = ARSDK_ARG_TYPE_ENUM,\n'
elif isinstance(arg.argType, ArBitfield):
table += ' .type = ARSDK_ARG_TYPE_%s,\n' % ArArgType.TO_STRING[arg.argType.btfType].upper()
else:
table += ' .type = ARSDK_ARG_TYPE_%s,\n' % ArArgType.TO_STRING[arg.argType].upper()
table += ' .enums = %s,\n' % enums
table += ' .nenums = %s,\n' % nenums
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
table = table + '};\n'
return table if cmd.args else ''
def dump_cmd_table(ftr, cl):
table = 'static | |
used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records.
debug : bool, optional
The argument is used to filter debug records.
warning : bool, optional
The argument is used to filter warning records.
error : bool, optional
The argument is used to filter error records.
critical : bool, optional
The argument is used to filter critical records.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism.
control : bool, optional
The argument is used to enable or disable execution break in case
on error.
maxsize : int or bool, optional
The argument is used to define maximum size of output file.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file.
maxlevel : int or bool, optional
The argument is used to define the break error level.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors.
"""
if isinstance(app, str) is True: self.app = app
if isinstance(desc, str) is True: self.desc = desc
if isinstance(version, (str, int, float)) is True:
self.version = version
# Build the output root if it is not exists. In other case modify
# existing output if it is requested.
if hasattr(self, 'root') is False:
self.root = Root(self, console=console, file=file, email=email,
html=html, table=table, status=status,
directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db)
else:
for key, value in {'console': console, 'file': file,
'email': email, 'html': html,
'table': table}.items():
if value is True:
getattr(self.root, key).open()
if key == 'file':
getattr(self.root, key).new()
elif value is False:
getattr(self.root, key).close()
# Customize output file path.
path = {}
if directory is not None: path['dir'] = directory
if filename is not None: path['name'] = filename
if extension is not None: path['ext'] = extension
if len(path) > 0:
self.root.file.configure(**path)
# Customize SMTP server.
if isinstance(smtp, dict) is True:
self.root.email.configure(**smtp)
# Customize database connection.
if isinstance(db, dict) is True:
self.root.table.configure(**db)
# Create formatter in case it is not exists yet or just customize it.
# Parameter format can be either string or dictionary.
# When it is string then it must describe records format.
# When it is dictionary it can contaion any parameter of formatter
# that must be customized.
if isinstance(format, str) is True:
format = {'record': format}
if hasattr(self, 'formatter') is False:
format = {} if isinstance(format, dict) is False else format
self.formatter = Formatter(**format)
elif isinstance(format, dict) is True:
self.formatter.configure(**format)
# Create or customize record type filters.
if hasattr(self, 'filters') is False:
self.filters = {}
for key, value in {'info': info, 'debug': debug, 'error': error,
'warning': warning, 'critical': critical}.items():
if isinstance(value, bool) is True:
self.filters[key] = value
# Customize limits and parameters of execution behaviour.
if isinstance(maxsize, (int, float, bool)) is True:
self._maxsize = maxsize
if isinstance(maxdays, (int, float, bool)) is True:
self._maxdays = maxdays
self.__calculate_restart_date()
if isinstance(maxlevel, (int, float, bool)) is True:
self._maxlevel = maxlevel
if isinstance(maxerrors, (int, float, bool)) is True:
self._maxerrors = maxerrors
if isinstance(alarming, bool) is True:
self._alarming = alarming
if isinstance(control, bool) is True:
self._control = control
# Initialize sysinfo instance when not exists.
if hasattr(self, 'sysinfo') is False:
self.sysinfo = Sysinfo(self)
# Initialize header instance when not exists.
if hasattr(self, 'header') is False:
self.header = Header(self)
pass
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Basic method to write records.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
self._with_error = True
self._count_errors += 1
format = self.formatter.error if format is None else format
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
if message is None and err_type is not None:
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_file=err_file, err_line=err_line,
err_obj=err_obj, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader as upper-case text between two border lines to the
output.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output. Useful when need to separate
| |
> txi or yj1 > tyj):
lcs_tmp.append((txi,tyj,xi1-1,yj1-1,4))
txi = xi2+1
tyj = yj2+1
isnake = isnake + 1
if (txi <= Me or tyj <= Ne):
lcs_tmp.append((txi,tyj,Me,Ne,4))
else:
lcs_tmp.append((Mb,Nb,Me,Ne,4))
lcs = lcs_tmp
#
# Expand certain differences
#
nsnake = len(lcs)
isnake = 0
tsnake = 0
lcs_tmp = []
while (isnake < nsnake):
(xi3,yj3,xi4,yj4,itype) = lcs[isnake]
isnake = isnake + 1
rsnake = tsnake
if xi4-xi3 != yj4-yj3:
#
# Expand the difference out to a set of whole lines
#
# Ensure that the end point is at least on the same line as the
# start point otherwise the expanded difference may not make any sense.
# There are at least two tricky bits:
# 1. Assume extra tokens have been added at the beginning of a line.
# Yj4 will then be yj3-1 and point to the line before yj3, whereas
# xi3 and xi4 will point to the same line. If nothing is done then
# after expansion yj3 and yj4 will still be the same but xi3 and xi4
# will have been expanded out. The additional differences are now
# unrecoverable.
# 2. Assume that xi3 and xi4 cover the addition of a full set of lines
# and yj4 == yj3-1. Simply adjusting yj4 leads to an extra reference
# token being added to the difference but no data token. Again an
# unintended difference would occur.
# To address both of the above one has to increase both end points
# simultaneously.
#
if xi3 > xi4:
yj4 = yj4 + (xi3 - xi4)
xi4 = xi3
elif yj3 > yj4:
xi4 = xi4 + (yj3 - yj4)
yj4 = yj3
#
lxi3 = toldiff_tokens.tokenno2lineno(dat,xi3)
xi3 = toldiff_tokens.lineno2tokenno(dat,lxi3)
lyj3 = toldiff_tokens.tokenno2lineno(ref,yj3)
yj3 = toldiff_tokens.lineno2tokenno(ref,lyj3)
lxi4 = toldiff_tokens.tokenno2lineno(dat,xi4)+1
xi4 = toldiff_tokens.lineno2tokenno(dat,lxi4)-1
lyj4 = toldiff_tokens.tokenno2lineno(ref,yj4)+1
yj4 = toldiff_tokens.lineno2tokenno(ref,lyj4)-1
tsnake = tsnake + 1
lcs_tmp.append((xi3,yj3,xi4,yj4,4))
else:
#
# Keep the difference the way it is
#
tsnake = tsnake + 1
lcs_tmp.append((xi3,yj3,xi4,yj4,4))
#
# Check if the new mismatch section overlaps with another one
#
done = false
while (rsnake > 0 and not done):
(xi1,yj1,xi2,yj2,itype) = lcs_tmp[rsnake-1]
if xi2 < xi3 and yj2 < yj3:
done = true
elif xi3 <= xi1 and yj3 <= yj1 and xi2 <= xi4 and yj2 <= yj4:
#
# rsnake is a sub set of the latest set so remove it
#
rsnake = rsnake - 1
tsnake = tsnake - 1
tt = lcs_tmp.pop(rsnake)
elif xi1 <= xi3 and yj1 <= yj3 and xi4 <= xi2 and yj4 <= yj2:
#
# tsnake is a sub set of rsnake so remove it
#
tsnake = tsnake - 1
tt = lcs_tmp.pop(tsnake)
done = true
elif xi1 <= xi3 and xi2 >= xi3 and xi2 <= xi4:
#
# overlapping mismatches merge both
#
lcs_tmp[rsnake-1] = (xi1,min(yj1,yj3),xi4,max(yj2,yj4),itype)
tt = lcs_tmp.pop(tsnake-1)
rsnake = rsnake - 1
tsnake = tsnake - 1
xi3 = xi1
yj3 = min(yj1,yj3)
#
elif yj1 <= yj3 and yj2 >= yj3 and yj2 <= yj4:
#
# overlapping mismatches merge both
#
lcs_tmp[rsnake-1] = (min(xi1,xi3),yj1,max(xi2,xi4),yj4,itype)
tt = lcs_tmp.pop(tsnake-1)
rsnake = rsnake - 1
tsnake = tsnake - 1
xi3 = min(xi1,xi3)
yj3 = yj1
#
else:
print "backtrack: this is odd..."
print "(yj1,yj2)<->(xi1,xi2) : ("+str(yj1)+","+str(yj2)+")<->("+str(xi1)+","+str(xi2)+")"
print "(yj3,yj4)<->(xi3,xi4) : ("+str(yj3)+","+str(yj4)+")<->("+str(xi3)+","+str(xi4)+")"
sys.exit(10)
lcs = lcs_tmp
#DEBUG
#print "--- trim_snakes different ---"
#print_lcs(sys.stdout,lcs)
#DEBUG
#
# Convert from mismatches back to a snake list
#
nsnake = len(lcs)
isnake = 0
lcs_tmp = []
if nsnake == 0:
lcs_tmp.append((Mb,Nb,Me,Ne,1))
else:
xi0 = Mb
yj0 = Nb
while (isnake < nsnake):
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
lcs_tmp.append((xi0,yj0,xi1-1,yj1-1,1))
xi0 = xi2+1
yj0 = yj2+1
lcs_tmp.append((xi0,yj0,Me,Ne,1))
lcs = lcs_tmp
#DEBUG
#print "--- trim_snakes out ---"
#print_lcs(sys.stdout,lcs)
#DEBUG
return lcs
def trim_snakes_old(lcs,ref,Nb,Ne,dat,Mb,Me):
"""Previously found matches can cause problems if they are not optimal.
In such a case sticking with the matches as found prevents subsequent
more advanced diff routines from recovering from an early sub-optimal
choice. To counter this all snakes and pseudo-snakes are trimmed down
such that they involve whole lines only.
The process is:
1. Merge subsequent snakes to build a list in which each pair of
snakes is separated by a non-empty section of mismatching tokens.
2. Trim each snake by increasing the starting point to the first token
on the next line, and decreasing the end point to the last token on
the previous line. If as a result the begin token exceeds the end
token then eliminate the snake.
The routine returns the revised snake list.
"""
#
# Collapse the snake list by merging adjacent snakes.
#
nsnake = len(lcs)
isnake = 0
if nsnake > 0:
lcs_tmp = []
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
while (isnake < nsnake):
(xi3,yj3,xi4,yj4,itype) = lcs[isnake]
isnake = isnake + 1
if (xi2+1 == xi3 and yj2+1 == yj3):
#
# This snake continues from the previous one so merge the two.
#
xi2 = xi4
yj2 = yj4
#
else:
#
# This snake is separated from the previous one so store the
# previous one and restart the merge procedure.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
xi1 = xi3
yj1 = yj3
xi2 = xi4
yj2 = yj4
#
# Store the last snake.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
lcs = lcs_tmp
#
# Trim the snakes to precisely matching lines.
#
nsnake = len(lcs)
isnake = 0
lcs_tmp = []
txi = 0
tyj = 0
while (isnake < nsnake):
(xi1,yj1,xi2,yj2,itype) = lcs[isnake]
isnake = isnake + 1
#
# Move the starting point to the first token on the next line unless
# the token is the first token on the current line.
#
lxi1 = toldiff_tokens.tokenno2lineno(dat,xi1)
txi1 = toldiff_tokens.lineno2tokenno(dat,lxi1)
lyj1 = toldiff_tokens.tokenno2lineno(ref,yj1)
tyj1 = toldiff_tokens.lineno2tokenno(ref,lyj1)
if txi1 != xi1 or tyj1 != yj1:
xi1 = toldiff_tokens.lineno2tokenno(dat,lxi1+1)
yj1 = toldiff_tokens.lineno2tokenno(ref,lyj1+1)
#
# Move the end point to the last token on the previous line unless
# the token is the last token on the current line.
#
lxi2 = toldiff_tokens.tokenno2lineno(dat,xi2)
txi2 = toldiff_tokens.lineno2tokenno(dat,lxi2+1)-1
lyj2 = toldiff_tokens.tokenno2lineno(ref,yj2)
tyj2 = toldiff_tokens.lineno2tokenno(ref,lyj2+1)-1
if txi2 != xi2 or tyj2 != yj2:
xi2 = toldiff_tokens.lineno2tokenno(dat,lxi2)-1
yj2 = toldiff_tokens.lineno2tokenno(ref,lyj2)-1
if xi1-1 <= xi2 and yj1-1 <= yj2 and (xi1 > txi or yj1 > tyj):
#
# There is a non-empty snake remaining so store it.
#
lcs_tmp.append((xi1,yj1,xi2,yj2,itype))
txi = max(xi1,xi2)
tyj = max(yj1,yj2)
#
lcs = lcs_tmp
return lcs
def find_lcs1(ref,Nb,Ne,dat,Mb,Me):
"""Compares the data stored in 'dat' against the data in 'ref',
and returns the longest common subsequence (LCS) in 'lcs'. The LCS
is stored as a list of snakes. A snake is a sequence of line pairs
(Xi,Yj) to (Xi+p,Yj+p) where the lines X and Y in every pair match.
Whatever happens between two snakes in a path is irrelevant.
As this routine looks for exact matches it produces type 1 snakes.
The algorithm used here is inspired by:
<NAME>, 'An O(ND) Difference Algorithm and Its Variations'
Algorithmica 1, 2 (1986), 251-266
http://www.cs.arizona.edu/people/gene/PAPERS/diff.ps
however I cannot guarantee that understood it well enough to reproduce
the actual published algorithm.
<NAME>, SciTech Daresbury Laboratory, June 2006.
"""
lcs = { }
# FP - Forward Pij
# Records the maximum number of diagonal lines of all candidates paths that
# passed through node (i,j). P is a dictionary with tuples (i,j) as keys and
# the maximum number as data.
FP = { }
# FV - Forward search path vector
# Stores the forwards search paths.
FV = { }
# NF - counter for generating forward search path keys
#NF = 1
#
s = search_path_linked()
s.set_lastpoint(Mb-1,Nb-1)
FV[(Mb-1,Nb-1)] = s
# flij - forward last i+j
# foij - forward | |
<reponame>ws0416/tencentcloud-cli-intl-en<gh_stars>0
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.apigateway.v20180808 import apigateway_client as apigateway_client_v20180808
from tencentcloud.apigateway.v20180808 import models as models_v20180808
def doCreateService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBuildAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BuildAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.BuildAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlansStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlansStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlansStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAPIDocDetail(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAPIDocDetailRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAPIDocDetail(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDemoteServiceUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DemoteServiceUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DemoteServiceUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiKeysStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiKeysStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiKeysStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApiEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApiEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLogSearch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLogSearchRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLogSearch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlanSecretIds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlanSecretIdsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlanSecretIds(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceSubDomains(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceSubDomainsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceSubDomains(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
| |
# this file contains all of the Chalice routing logic to implement the AWS Data API as a REST JSON Endpoint using IAM
# authentication. The Data API can also be accessed natively as a python library using aws_data_api.py
from chalice import Chalice, CORSConfig, Response, IAMAuthorizer, CognitoUserPoolAuthorizer, AuthResponse, \
BadRequestError, ConflictError, NotFoundError
import http
import os
import boto3
from functools import wraps
import chalicelib.utils as utils
from chalicelib.api_metadata import ApiMetadata
import chalicelib.parameters as params
# TODO move data api implementation to cell based layers
import chalicelib.aws_data_api as dapi
from chalicelib.streams_integration import StreamsIntegration
from chalicelib.exceptions import *
import chalicelib.understander as u
from chalicelib.data_api_cache import DataApiCache
import json
# this environment variable is setup by AWS Lambda
REGION = os.getenv('AWS_REGION')
# this environment variable comes from Chalice
STAGE = os.getenv('STAGE')
# create the Chalice App reference
app_name = "%s-%s" % (params.AWS_DATA_API_SHORTNAME, STAGE)
app = Chalice(app_name=app_name)
# This is the authorizer that will be used when you set environment parameter SYSTEM_AUTHORIZER to 'Custom'. You can
# put in whatever custom code you wish here
@app.authorizer()
def custom_auth(auth_request):
token = auth_request.token
# This is just for demo purposes as shown in the API Gateway docs.
# Normally you'd call an oauth provider, validate the jwt token, etc.
# In this exampe, the token is treated as the status for demo purposes.
if token == 'allow':
return AuthResponse(routes=['/'], principal_id='user')
else:
# By specifying an empty list of routes,
# we're saying this user is not authorized
# for any URLs, which will result in an
# Unauthorized response.
return AuthResponse(routes=[], principal_id='user')
# setup authorisers for view methods
iam_authorizer = IAMAuthorizer()
set_authorizer = os.getenv(params.AUTHORIZER_PARAM)
if set_authorizer == params.AUTHORIZER_IAM:
use_authorizer = iam_authorizer
elif set_authorizer == params.AUTHORIZER_COGNITO:
# check that we have the required configuration to setup Cognito auth
cog_pool_name = os.getenv(params.COGNITO_POOL_NAME)
cog_provider_arns = os.getenv(params.COGNITO_PROVIDER_ARNS)
if cog_pool_name is not None and cog_provider_arns is not None:
cognito_authorizer = CognitoUserPoolAuthorizer(cog_pool_name, provider_arns=cog_provider_arns.split(','))
else:
print("Unable to configure Cognito Authorizer without %s and %s configuration items" % params.COGNITO_POOL_NAME,
params.COGNITO_PROVIDER_ARNS)
elif set_authorizer == params.AUTHORIZER_CUSTOM:
use_authorizer = None
else:
use_authorizer = None
if use_authorizer is None:
print("Stage deployed without Authorizer")
else:
print("Using Authorizer %s" % set_authorizer.__name__)
# setup class logger
log = utils.setup_logging()
# create an API Metadata Handler
api_metadata_handler = ApiMetadata(REGION, log)
# create a cache of all API references tracked by this deployment stage
api_cache = DataApiCache(app=app, stage=STAGE, region=REGION, logger=log)
# create the streams integration handler, which is used by the lambda function embedded at the end of this app
es_indexer = None
# module level settings used as flags for lazy initialisers in functions
search_flow_verified = False
# load the cors config
cors_config = None
cors = None
try:
with open("chalicelib/cors.json", "r") as f:
cors_config = json.load(f)
if cors_config.get("AllowAllCORS") == "True":
cors = True
else:
cors = CORSConfig(**cors_config.get("custom"))
except FileNotFoundError:
pass
# using a functools wrapper here as normal python decorators aren't compatible with the call signature of chalice
def chalice_function(f):
@wraps(f)
def wrapper(*args, **kwargs):
headers = {'Content-Type': 'text/json'}
try:
log.debug(f"Function: {f.__name__}")
log.debug(f"ARGS: {args}")
log.debug(f"KWARGS: {kwargs}")
log.debug(f"Query Params: {app.current_request.query_params}")
log.debug(f"Raw Body: {app.current_request.raw_body}")
result = f(*args, **kwargs)
log.debug(f"Result of Data API function call: {result}")
status_code = http.HTTPStatus.OK
body = None
if result is None:
status_code = http.HTTPStatus.NO_CONTENT
else:
if isinstance(result, Response):
return result
elif isinstance(result, int):
return Response(body=body,
status_code=result,
headers={'Content-Type': 'text/plain'})
elif isinstance(result, bool):
if result is True:
status_code = http.HTTPStatus.CREATED
else:
status_code = http.HTTPStatus.NO_CONTENT
else:
if params.RESPONSE_BODY in result:
body = result[params.RESPONSE_BODY]
if params.DATA_MODIFIED in result and result.get(params.DATA_MODIFIED) is True:
status_code = http.HTTPStatus.CREATED
elif params.DATA_MODIFIED in result and result.get(params.DATA_MODIFIED) is False:
status_code = http.HTTPStatus.NOT_MODIFIED
else:
body = utils.decorate(result)
return Response(body=body,
status_code=status_code,
headers=headers)
except ConstraintViolationException as cve:
log.error(str(cve))
raise ConflictError(cve)
except (UnimplementedFeatureException, ResourceNotFoundException) as ufe:
log.error(str(ufe))
raise NotFoundError(ufe)
except InvalidArgumentsException as iae:
log.error(str(iae))
raise BadRequestError(iae)
except DetailedException as ge:
log.error(str(ge))
return Response(body=str({"message": ge.message, "detail": ge.detail}),
headers={'Content-Type': 'text/json'},
status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)
except Exception as e:
log.error(str(e))
raise e
return wrapper
def _add_api_defaults(api_metadata):
def _add(name, value):
if value is not None:
api_metadata[name] = value
# add all the application level defaults if they aren't overridden in the supplied API Metadata
_add(params.REGION, REGION)
_add(params.STAGE, STAGE)
# TODO Can we add a method to query what are the available stages?
@app.route('/namespaces', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def namespaces():
return dapi.get_registry(REGION, STAGE)
@app.route('/version', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def get_version():
return {"version": dapi.__version__}
@app.route('/data-apis', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def get_all_data_apis():
return utils.get_all_data_apis()
@app.route('/{api_name}/provision', methods=['PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def provision_api(api_name):
body = app.current_request.json_body
return dapi.async_provision(api_name=api_name, stage=STAGE, region=REGION, logger=log, **body)
@app.route('/{api_name}/drop', methods=['PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def drop_api(api_name):
do_export = True
qp = app.current_request.query_params
df = "DoFinalExport"
if qp is not None and df in qp:
do_export = bool(qp.get(df))
return api_cache.get(api_name).drop(do_export=do_export)
@app.route("/{api_name}/status", methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def get_api_status(api_name):
# have to call a non-class method here as the API may not be online and therefore not in cache
return dapi.get_api_status(api_name, stage=STAGE, region=REGION)
# method to get and create API level metadata - not per-item
@app.route('/{api_name}/info', methods=['GET', 'PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def namespace_metadata(api_name):
request = app.current_request
if request.method == 'GET':
attr_filter = None
if request.query_params is not None and params.ATTRIBUTE_FILTER_PARAM in request.query_params:
attr_filter = request.query_params.get(params.ATTRIBUTE_FILTER_PARAM).split(',')
return api_metadata_handler.get_api_metadata(api_name=api_name, stage=STAGE,
attribute_filters=attr_filter)
else:
response = api_metadata_handler.update_metadata(api_name=api_name, stage=STAGE, updates=request.json_body)
if response is not None:
# remove the API from the cache, so that we will reinstantiate the reference on next call
api_cache.remove(api_name)
return {params.DATA_MODIFIED: True,
params.RESPONSE_BODY: response}
else:
return None
@app.route('/{api_name}/usage', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def get_usage(api_name):
return api_cache.get(api_name).get_usage()
# method to get and create API level metadata - not per-item
@app.route('/{api_name}/{id}/understand', methods=['PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def run_understanding(api_name, id):
body = app.current_request.json_body
return api_cache.get(api_name).understand(id=id, storage_location=body.get(params.STORAGE_LOCATION_ATTRIBUTE,
params.DEFAULT_STORAGE_LOCATION_ATTRIBUTE))
# method to get and create API level schema information
@app.route('/{api_name}/schema/{schema_type}', methods=['GET', 'PUT', 'DELETE'], authorizer=use_authorizer, cors=cors)
@chalice_function
def schema(api_name, schema_type):
if schema_type is None or schema_type.lower() not in [params.RESOURCE.lower(), params.METADATA.lower()]:
raise BadRequestError("Must supply a schema type of Resource or Metadata")
request = app.current_request
api = api_cache.get(api_name)
if request.method == 'GET':
return api.get_schema(schema_type)
elif request.method == 'DELETE':
delete_performed = api.remove_schema(schema_type)
return {params.DATA_MODIFIED: delete_performed}
else:
return {params.DATA_MODIFIED: api.put_schema(schema_type=schema_type,
schema=app.current_request.json_body)}
# method to set an ItemMaster ID on an Item
@app.route('/{api_name}/ItemMaster', methods=['DELETE', 'PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def process_item_master(api_name):
api = api_cache.get(api_name)
if app.current_request.method == 'PUT':
return {params.DATA_MODIFIED: True,
params.RESPONSE_BODY: api.item_master_update(**app.current_request.json_body)}
else:
return {params.DATA_MODIFIED: api.item_master_delete(**app.current_request.json_body)}
# method to perform a simple query based on the attributes supplied in the request body
@app.route('/{api_name}/find', methods=['POST'], authorizer=use_authorizer, cors=True)
@chalice_function
def find_item(api_name):
return api_cache.get(api_name).find(**app.current_request.json_body)
# method to perform an elasticsearch query
@app.route('/{api_name}/search/{search_type}', methods=['PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def api_search(api_name, search_type):
return api_cache.get(api_name).search(search_type, **app.current_request.json_body)
# method to get, delete, and check for an item with ID from the body/uri
@app.route('/{api_name}', methods=['GET', 'DELETE', 'HEAD', 'PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def process_general_request(api_name):
# try to extract the ID from where it might be stored
api = api_cache.get(api_name)
primary_key_attribute = api.get_primary_key()
item_id = None
if app.current_request.method in ['GET', 'HEAD']:
if primary_key_attribute in app.current_request.query_params:
item_id = app.current_request.query_params.get(primary_key_attribute)
else:
# PUT or DELETE
resource_body = app.current_request.json_body.get(params.RESOURCE)
if primary_key_attribute in app.current_request.json_body:
item_id = app.current_request.json_body.get(primary_key_attribute)
del app.current_request.json_body[primary_key_attribute]
elif primary_key_attribute in resource_body:
item_id = resource_body.get(primary_key_attribute)
del resource_body[primary_key_attribute]
if item_id is None:
raise InvalidArgumentsException("Unable to resolve Primary Key for Request")
else:
return process_item_request(api_name, item_id)
# method to get, delete, and check for an item based on its ID
@app.route('/{api_name}/{id}', methods=['GET', 'DELETE', 'HEAD', 'PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def process_item_request(api_name, id):
if id is None:
raise InvalidArgumentsException("Unable to transact without Primary Key for Item")
request = app.current_request
api = api_cache.get(api_name)
if request.method == 'GET':
master = None
# determine if metadata should be included in the request
suppress_meta_fetch = False
qp = app.current_request.query_params
if qp is not None and params.ITEM_MASTER_QP in qp:
master = qp[params.ITEM_MASTER_QP]
if qp is not None and params.SUPPRESS_ITEM_METADATA_FETCH in qp:
suppress_meta_fetch = utils.strtobool(qp.get(params.SUPPRESS_ITEM_METADATA_FETCH))
# determine if an attribute whitelist has been included
only_attributes = None
if qp is not None and params.WHITELIST_ATTRIBUTES in qp:
only_attributes = qp.get(params.WHITELIST_ATTRIBUTES).split(',')
# determine if an attribute blacklist has been included
not_attributes = None
if qp is not None and params.BLACKLIST_ATTRIBUTES in qp:
not_attributes = qp.get(params.BLACKLIST_ATTRIBUTES).split(',')
return api.get(id=id, master_option=master, suppress_meta_fetch=suppress_meta_fetch,
only_attributes=only_attributes, not_attributes=not_attributes)
elif request.method == 'DELETE':
return {params.DATA_MODIFIED: api.delete(id=id, **request.json_body)}
elif request.method == 'HEAD':
return api.check(id=id)
elif request.method == 'PUT':
return api.update_item(id=id, **request.json_body)
# TODO Add a non-id based URL path for this operation
# method to restore an object from deletion
@app.route('/{api_name}/{id}/restore', methods=['PUT'], authorizer=use_authorizer, cors=cors)
@chalice_function
def restore(api_name, id):
return {params.DATA_MODIFIED: True,
params.RESPONSE_BODY: api_cache.get(api_name).restore(id=id)}
# method to retrieve metadata only for an item
@app.route('/{api_name}/{id}/meta', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def metadata(api_name, id):
return api_cache.get(api_name).get_metadata(id=id)
# method to paginate a bunch of items
@app.route('/{api_name}/list', methods=['GET'], authorizer=use_authorizer, cors=cors)
@chalice_function
def list_request(api_name):
query_params = app.current_request.query_params
if query_params is None:
query_params = {}
return api_cache.get(api_name).list(**query_params)
# method to get stream information for an | |
not LOAD_AW1_TO_RAW_TABLE or LOAD_AW2_TO_RAW_TABLE")
return GenomicSubProcessResult.ERROR
# look up if any rows exist already for the file
records = dao.get_from_filepath(self.target_file)
if records:
logging.warning(f'File already exists in raw table: {self.target_file}')
return GenomicSubProcessResult.SUCCESS
file_data = self._retrieve_data_from_path(self.target_file)
# Return the error status if there is an error in file_data
if not isinstance(file_data, dict):
return file_data
# Processing raw data in batches
batch_size = 100
item_count = 0
batch = list()
for row in file_data['rows']:
# Standardize fields to lower, no underscores or spaces
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
row_obj = self._set_raw_awn_attributes(row, awn_model(), columns)
batch.append(row_obj)
item_count += 1
if item_count == batch_size:
# Insert batch into DB
with dao.session() as session:
session.bulk_save_objects(batch)
# Reset batch
item_count = 0
batch = list()
if item_count:
# insert last batch if needed
with dao.session() as session:
session.bulk_save_objects(batch)
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw1_row_for_member(self, member):
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['BIOBANK_ID'][1:] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
ingested_before = member.reconcileGCManifestJobRunId is not None
# Write AW1 data to genomic_set_member table
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
# Set attributes from file
for key in gc_manifest_column_mappings.keys():
try:
member.__setattr__(key, row[gc_manifest_column_mappings[key]])
except KeyError:
member.__setattr__(key, None)
# Set other fields not in AW1 file
member.reconcileGCManifestJobRunId = self.job_run_id
member.aw1FileProcessedId = self.file_obj.id
member.gcSite = self._get_site_from_aw1()
# Only update the member's genomicWorkflowState if it was AW0
if member.genomicWorkflowState == GenomicWorkflowState.AW0:
member.genomicWorkflowState = GenomicWorkflowState.AW1
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest record count
if not ingested_before and not self.controller.bypass_record_count:
self.increment_manifest_file_record_count_from_id()
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw2_row_for_member(self, member: GenomicSetMember) -> GenomicSubProcessResult:
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['Biobank ID'] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
# Beging prep aw2 row
row = self.prep_aw2_row_attributes(row, member)
if row == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
metric_id = None
upserted_obj = self.metrics_dao.upsert_gc_validation_metrics_from_dict(row, metric_id)
# Update GC Metrics for PDR
if upserted_obj:
bq_genomic_gc_validation_metrics_update(upserted_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(upserted_obj.id)
self.update_member_for_aw2(member)
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest feedback record count
if existing_metrics_obj is None and not self.controller.bypass_record_count:
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId,
_project_id=self.controller.bq_project_id)
return GenomicSubProcessResult.SUCCESS
def increment_manifest_file_record_count_from_id(self):
"""
Increments the manifest record count by 1
"""
manifest_file = self.manifest_dao.get(self.file_obj.genomicManifestFileId)
manifest_file.recordCount += 1
with self.manifest_dao.session() as s:
s.merge(manifest_file)
bq_genomic_manifest_file_update(manifest_file.id, project_id=self.controller.bq_project_id)
genomic_manifest_file_update(manifest_file.id)
def prep_aw2_row_attributes(self, row: dict, member: GenomicSetMember):
"""
Set contamination, contamination category,
call rate, member_id, and file_id on AW2 row dictionary
:param member:
:param row:
:return: row dictionary or ERROR code
"""
row['member_id'] = member.id
row['file_id'] = self.file_obj.id
# Truncate call rate
try:
row['callrate'] = row['callrate'][:10]
except KeyError:
pass
# Convert blank alignedq30bases to none
try:
if row['alignedq30bases'] == '':
row['alignedq30bases'] = None
except KeyError:
pass
# Validate and clean contamination data
try:
row['contamination'] = float(row['contamination'])
# Percentages shouldn't be less than 0
if row['contamination'] < 0:
row['contamination'] = 0
except ValueError:
if row['processingstatus'].lower() != 'pass':
return row
_message = f'{self.job_id.name}: Contamination must be a number for sample_id: {row["sampleid"]}'
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.DATA_VALIDATION_FAILED.name,
message=_message,
biobank_id=member.biobankId,
sample_id=row['sampleid'],
)
return GenomicSubProcessResult.ERROR
# Calculate contamination_category
contamination_value = float(row['contamination'])
category = self.calculate_contamination_category(
member.collectionTubeId,
contamination_value,
member
)
row['contamination_category'] = category
# handle mapped reads in case they are longer than field length
if 'mappedreadspct' in row.keys():
if len(row['mappedreadspct']) > 10:
row['mappedreadspct'] = row['mappedreadspct'][0:10]
return row
def update_member_for_aw2(self, member: GenomicSetMember):
"""
Updates the aw2FileProcessedId and possibly the genomicWorkflowState
of a GenomicSetMember after AW2 data has been ingested
:param member:
"""
member.aw2FileProcessedId = self.file_obj.id
# Only update the state if it was AW1
if member.genomicWorkflowState == GenomicWorkflowState.AW1:
member.genomicWorkflowState = GenomicWorkflowState.AW2
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
def _ingest_gem_a2_manifest(self, rows):
"""
Processes the GEM A2 manifest file data
Updates GenomicSetMember object with gem_pass field.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.A1)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemPass = row['success']
member.gemA2ManifestJobRunId = self.job_run_id
member.gemDateOfImport = parse(row['date_of_import'])
_signal = 'a2-gem-pass' if member.gemPass.lower() == 'y' else 'a2-gem-fail'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_gem_metrics_manifest(self, rows):
"""
Processes the GEM Metrics manifest file data
Updates GenomicSetMember object with metrics fields.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.GEM_RPT_READY)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemMetricsAncestryLoopResponse = row['ancestry_loop_response']
member.gemMetricsAvailableResults = row['available_results']
member.gemMetricsResultsReleasedAt = row['results_released_at']
member.colorMetricsJobRunID = self.job_run_id
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw4_manifest(self, rows):
"""
Processes the AW4 manifest file data
:param rows:
:return:
"""
try:
for row in rows:
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
sample_id = row_copy['sampleid']
genome_type = GENOME_TYPE_ARRAY \
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW else GENOME_TYPE_WGS
member = self.member_dao.get_member_from_aw3_sample(sample_id,
genome_type)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.aw4ManifestJobRunID = self.job_run_id
member.qcStatus = self._get_qc_status_from_value(row_copy['qcstatus'])
metrics = self.metrics_dao.get_metrics_by_member_id(member.id)
if metrics:
metrics.drcSexConcordance = row_copy['drcsexconcordance']
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
metrics.drcCallRate = row_copy['drccallrate']
elif self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
metrics.drcContamination = row_copy['drccontamination']
metrics.drcMeanCoverage = row_copy['drcmeancoverage']
metrics.drcFpConcordance = row_copy['drcfpconcordance']
metrics_obj = self.metrics_dao.upsert(metrics)
bq_genomic_gc_validation_metrics_update(metrics_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(metrics_obj.id)
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _retrieve_data_from_path(self, path):
"""
Retrieves the last genomic data file from a bucket
:param path: The source file to ingest
:return: CSV data as a dictionary
"""
try:
filename = path.split('/')[1]
logging.info(
'Opening CSV file from queue {}: {}.'
.format(path.split('/')[1], filename)
)
if self.controller.storage_provider:
with self.controller.storage_provider.open(path, 'r') as csv_file:
return self._read_data_to_ingest(csv_file)
else:
with open_cloud_file(path) as csv_file:
return self._read_data_to_ingest(csv_file)
except FileNotFoundError:
logging.error(f"File path '{path}' not found")
return GenomicSubProcessResult.ERROR
@staticmethod
def _read_data_to_ingest(csv_file):
data_to_ingest = {'rows': []}
csv_reader = csv.DictReader(csv_file, delimiter=",")
data_to_ingest['fieldnames'] = csv_reader.fieldnames
for row in csv_reader:
for key in row:
if not key:
del row[key]
data_to_ingest['rows'].append(row)
return data_to_ingest
def _process_aw1_attribute_data(self, aw1_data, member):
"""
Checks a GenomicSetMember object for changes provided by AW1 data
And mutates the GenomicSetMember object if necessary
:param aw1_data: dict
:param member: GenomicSetMember
:return: (boolean, GenomicSetMember)
"""
# Check if the member needs updating
if self._test_aw1_data_for_member_updates(aw1_data, member):
member = self._set_member_attributes_from_aw1(aw1_data, member)
member = self._set_rdr_member_attributes_for_aw1(aw1_data, member)
return True, member
return False, member
def _test_aw1_data_for_member_updates(self, aw1_data, member):
"""
Checks each attribute provided by Biobank
for changes to GenomicSetMember Object
:param aw1_data: dict
:param member: GenomicSetMember
:return: boolean (true if member requires updating)
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
member_needs_updating = False
# Iterate each value and test whether the strings for each field correspond
for key in gc_manifest_column_mappings.keys():
if str(member.__getattribute__(key)) != str(aw1_data.get(gc_manifest_column_mappings[key])):
member_needs_updating = True
return member_needs_updating
def _set_member_attributes_from_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember attributes provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
for key in gc_manifest_column_mappings.keys():
member.__setattr__(key, aw1_data.get(gc_manifest_column_mappings[key]))
return member
def _set_rdr_member_attributes_for_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember RDR attributes not provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
# Set job run and file processed IDs
member.reconcileGCManifestJobRunId = self.job_run_id
# Don't overwrite aw1_file_processed_id when ingesting an AW1F
if self.job_id == GenomicJob.AW1_MANIFEST:
member.aw1FileProcessedId = self.file_obj.id
# Set the GC site ID (sourced from file-name)
member.gcSiteId = aw1_data['site_id']
# Only update the state if it was AW0 or AW1 (if in failure | |
actions is None or not isinstance(actions, list):
raise TypeError("actions should be a list, found: %r" % actions)
# for a in actions:
# validate_action(a)
self.actions = actions
self.tell_why_am_i_running = tell_why_am_i_running
# store other attributes
self.file_dep = file_dep
self.task_dep = task_dep
self.uptodate = uptodate
self.targets = targets
self.clean = clean
# advanced ones
self.setup = setup
self.teardown = teardown
self.getargs = getargs
self.calc_dep = calc_dep
self.verbosity = verbosity
# finally attach the `create_doit_tasks` hook if needed
self.create_doit_tasks = self._create_doit_tasks_noargs
def _create_doit_tasks_noargs(self):
return self._create_doit_tasks()
def _create_doit_tasks(self, is_subtask=False):
"""Called by doit to know this task's definition, or by `@taskgen`"""
# first get the base description
task_dict = self.get_base_desc(is_subtask=is_subtask)
# actions
if self.tell_why_am_i_running:
actions = [why_am_i_running] + self.actions
else:
actions = self.actions
task_dict.update(actions=actions)
# task dep, setup, calc dep: support direct link
if self.task_dep is not None:
task_dict.update(task_dep=replace_task_names(self.task_dep))
if self.setup is not None:
task_dict.update(setup=replace_task_names(self.setup))
if self.calc_dep is not None:
task_dict.update(calc_dep=replace_task_names(self.calc_dep))
# others: simply use if not none
if self.file_dep is not None:
task_dict.update(file_dep=self.file_dep)
if self.uptodate is not None:
task_dict.update(uptodate=self.uptodate)
if self.targets is not None:
task_dict.update(targets=self.targets)
if self.clean is not None:
task_dict.update(clean=self.clean)
if self.teardown is not None:
task_dict.update(teardown=self.teardown)
if self.getargs is not None:
task_dict.update(getargs=self.getargs)
if self.verbosity is not None:
task_dict.update(verbosity=self.verbosity)
return task_dict
class taskgen(taskbase):
"""
A decorator to create a doit task generator (See https://pydoit.org/tasks.html#sub-tasks).
Similar to `@task`, you can use it without arguments and it will capture the name and docstring of the decorated
function. This function needs to be a generator, meaning that it should `yield` tasks. Such tasks can be plain old
dictionaries as in `doit`, or can be created with `task`.
For example this is a task group named `mygroup` with two tasks `mygroup:echo0` and `mygroup:echo1`
```python
from doit_api import taskgen, task
@taskgen
def mygroup():
''' hey!!! '''
for i in range(2):
yield task(name="echo%s" % i, actions=["echo hi > hoho%s.txt" % i], targets=["hoho%s.txt" % i])
```
And this is one with two python subtasks:
```python
from doit_api import taskgen, task
@taskgen
def mygroup():
''' hey!!! '''
for i in range(2):
@task(name="subtask %i" % i,
doc="a subtask %s" % i,
title="this is %s running" % i)
def c_():
print("hello sub")
yield c_
```
`@taskgen` only accepts three optional arguments: `name` (that will be used for the base group name), doc, and
title.
"""
def __init__(self,
_func=None,
# *, (support for python 2: no kw only args)
# -- task information
name=None, # type: str
doc=None, # type: str
# -- what the task is doing when run
title=None # type: Union[str, Callable]
):
"""
:param name: an alternate base name for the task group. By default the name of the decorated function is used.
See https://pydoit.org/tasks.html#sub-tasks
:param doc: an optional documentation string for the task group. By default the decorated
function docstring will be used. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when this task group is run.
If nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
"""
# base
super(taskgen, self).__init__(name=name, doc=doc, title=title)
# this will be non-None if @taskgen is used as a decorator without arguments
self.func = _func
# late-rename so that doit doesn't try to call the unbound method.
self.create_doit_tasks = self._create_doit_tasks
def __call__(self, func):
self.func = func # When instantiated with kwargs & used as a decorator
return self
def _create_doit_tasks(self):
"""Called by doit to know this task's definition"""
# validate decorated function - a generator
if self.func is None:
raise TypeError("No task generator function is provided")
if not isgeneratorfunction(self.func):
raise TypeError("The decorated function should be a generator")
# Protect against empty subtasks by yielding a first def with name None, see https://pydoit.org/tasks.html#sub-tasks
self.add_default_desc_from_fun(self.func)
yield self.get_base_desc(name=None)
for f in self.func():
if isinstance(f, dict):
yield f
else:
yield f._create_doit_tasks(is_subtask=True)
# class TaskBase(object):
# todo we could wish to provide the same level of functionality than this letsdoit class, but with fields listed.
# """Subclass this to define tasks."""
# @classmethod
# def create_doit_tasks(cls):
# if cls is TaskBase:
# return # avoid create tasks from base class 'Task'
# instance = cls()
# kw = dict((a, getattr(instance, a)) \
# for a in dir(instance) if not a.startswith('_'))
#
# kw.pop('create_doit_tasks')
# if 'actions' not in kw:
# kw['actions'] = [kw.pop('run')]
# if 'doc' not in kw and (cls.__doc__ != TaskBase.__doc__):
# kw['doc'] = cls.__doc__
# return kw
def cmdtask(
# -- task information
name=None, # type: Union[str, Any]
doc=None, # type: str
# -- what the task is doing when run
title=title_with_actions, # type: Union[str, Callable]
pre_actions=None, # type: List[DoitAction]
post_actions=None, # type: List[DoitAction]
tell_why_am_i_running=True, # type: bool
# -- preventing useless runs and selecting order
targets=None, # type: List[DoitPath]
clean=None, # type: Union[bool, List[DoitAction]]
file_dep=None, # type: List[DoitPath]
task_dep=None, # type: List[DoitTask]
uptodate=None, # type: List[Optional[Union[bool, Callable, str]]]
# -- advanced
setup=None, # type: List[DoitTask]
teardown=None, # type: List[DoitAction]
getargs=None, # type: Dict[str, Tuple[str, str]]
calc_dep=None, # type: List[DoitTask]
# -- misc
verbosity=None # type: int
):
"""
A decorator to create a task containing a shell command action (returned by the decorated function), and
optional additional actions.
```python
from doit_api import cmdtask
@cmdtask
def a():
''' the doc for a '''
return "echo hi"
@cmdtask(targets='foo.txt', file_deps=..., ...)
def b():
''' the doc for b '''
return '''
echo about to create file
echo hi > foo.txt
'''
@cmdtask
def c():
''' the doc for c '''
return [
"echo hi",
("echo", "hi")
]
```
A minimal `doit` task consists of one or several actions. Here, the main action is a shell command or sequence
of shell commands, returned by the decorated function. In addition to supporting all ways to express a command
action in doit, this also supports multiline strings to easily concatenate several commands into one (see rejected
[feature request](https://github.com/pydoit/doit/issues/314)), and plain string or tuple (not in a list). Your
function can therefore return:
- A string (command to be executed with the shell).
- A multiline string (commands to be executed with the shell. Blank lines automatically trimmed.
All lines are concatenated into the same shell command using '&' (windows) or ';' (linux) before
execution). This allows several commands to leverage each other, for example `conda activate` + some
python execution.
- a tuple (not list!) of strings or pathlib Paths (command to be executed without the shell).
- a list of strings or tuples. Note that in this case strings can not be multiline.
See [doit cmd-action](https://pydoit.org/tasks.html#cmd-action).
You can specify actions to be done before and after that/these `actions` in `pre_actions` and `post_actions`.
If `tell_why_i_am_running=True` (default) an additional action will be prepended to print the reason why the
task is running if the task declared a `file_dep`, `task_dep`, `uptodate` or `targets`.
All other parameters match those in `doit` conventions (See docstrings below), except
- `name` that is an intelligent placeholder for `basename` (if a task is a simple task) or `name` (if the task
is a subtask in a `@taskgen` generator),
- `title` that adds support for plain strings and by default is `title_with_actions`
- `task_dep`, `setup` and `calc_dep` where if a task callable (decorated with `@task` or not) is provided, the
corresponding name will be used.
Note: the `watch` parameter (Linux and Mac only) is not yet supported.
See https://pydoit.org/cmd_other.html?highlight=watch#auto-watch
:param name: an alternate name for the task. By default the name of the decorated function is used. Note that
this parameter will intelligently set 'basename' for normal tasks or 'name' for subtasks in a task
generator (`@taskgen`). See https://pydoit.org/tasks.html#task-name
:param doc: an optional documentation string for the task. By default, the decorated function docstring will
be used. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when the task is run. If
nothing | |
<reponame>pingjuiliao/cb-multios
#!/usr/bin/env python
from cStringIO import StringIO
from generator.actions import Actions
import random
import struct
SAMPLE_RATE = 8000.0
SAMPLE_MAX = 0x7FFFFFFF
SAMPLE_MIN = -0x80000000
# *$!@ this #%*!
def c_div(num, denom):
neg = -1 if num < 0 else 1
if num < 0:
num = -num
return neg * (num / denom)
def saturate(value):
if value > SAMPLE_MAX:
value = SAMPLE_MAX
elif value < SAMPLE_MIN:
value = SAMPLE_MIN
return value
class Compression4(object):
index_table = [
-1, -1, -1, -1, 1, 3, 6, 9
]
step_table = [
6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
]
def __init__(self, output):
self.output = output
def next_samples(self):
samples = [c.data[self.idx] >> 16 for c in self.track.channels]
self.idx += 1
return samples
def best_index(self, sample1, sample2):
diff = abs(sample2 - sample1)
best = None
for i in xrange(len(self.step_table)):
step = self.step_table[i]
if best is None or abs(step - diff) < best[1]:
best = (i, abs(step - diff))
return best[0]
def new_block(self):
self.pred = []
self.index = []
samples = self.next_samples()
for x in xrange(len(samples)):
sample = samples[x]
self.pred.append(sample)
if self.idx < self.track.samples():
self.index.append(self.best_index(sample, self.track.channels[x].data[self.idx] >> 16))
else:
self.index.append(self.best_index(sample, 0))
self.output.write(struct.pack('<hBB', sample, self.index[x], 0))
def decode(self, value, ch):
step = self.step_table[self.index[ch]]
self.index[ch] += self.index_table[value & 7]
if value & 8:
step = -step
if value & 4:
self.pred[ch] += step
if value & 2:
self.pred[ch] += step >> 1
if value & 1:
self.pred[ch] += step >> 2
self.index[ch] = min(7, max(self.index[ch], 0))
self.pred[ch] = min(0x7FFF, max(self.pred[ch], -0x8000))
return self.pred[ch]
def encode(self, sample, ch):
diff = sample - self.pred[ch]
if diff < 0:
value = 0x8
diff = -diff
else:
value = 0x0
step = self.step_table[self.index[ch]]
value |= min(diff * 4 / step, 7)
self.decode(value, ch)
return value
def compress(self, track):
self.track = track
self.idx = 0
while self.idx < self.track.samples():
if (self.output.tell() % 512) == 0:
# output block header
self.new_block()
else:
samples = self.next_samples()
if len(samples) == 1:
samples += self.next_samples()
self.output.write(struct.pack('<B', (self.encode(samples[0], 0) << 4) | self.encode(samples[1], 0)))
else:
self.output.write(struct.pack('<B', (self.encode(samples[0], 0) << 4) | self.encode(samples[1], 1)))
class Gain(object):
def __init__(self, value):
self.value = saturate(value)
@classmethod
def from_double(cls, value):
if value < 0:
raise NotImplemented()
if value >= 1:
return Gain(int((value - 1) / 10 * SAMPLE_MAX))
else:
return Gain(int((1/value - 1) / 10 * -SAMPLE_MAX))
@classmethod
def from_rational(cls, num, denom):
return Gain(int(float(num) / float(denom) / 10 * SAMPLE_MAX))
# return Gain(c_div(c_div(num * SAMPLE_MAX, denom), 10))
@classmethod
def from_pan(cls, pan):
return Gain(0 if pan <= 0 else -pan), Gain(0 if pan > 0 else pan)
def __add__(self, other):
if not isinstance(other, Gain):
raise NotImplementedError()
x = self.value + other.value
return Gain(x)
def __mul__(self, other):
if not isinstance(other, Gain):
raise NotImplementedError()
return Gain.from_double(self.as_double() * other.as_double())
def as_double(self):
x = float(self.value)
x *= 10
x /= SAMPLE_MAX
if x >= 0:
return x + 1
else:
return 1 / (-x + 1)
def apply(self, sample):
sample = int(self.as_double() * sample)
return saturate(sample)
class AudioStream(object):
def __init__(self, data):
self.data = data
@classmethod
def fromSilence(cls, length):
return cls([0] * length)
@classmethod
def fromSquareWave(cls, length, hz):
data = []
rate = 2 * SAMPLE_RATE / hz
for x in xrange(length):
if (int(x / rate) % 2) == 0:
data.append(-(SAMPLE_MAX / 2))
else:
data.append(SAMPLE_MAX / 2)
return cls(data)
def extend(self, length):
diff = length - len(self.data)
if diff <= 0:
return
self.data += [0] * diff
def mix(self, other, gain):
self.extend(len(other.data))
for x in xrange(len(other.data)):
sample = other.data[x]
self.data[x] = saturate(self.data[x] + gain.apply(sample))
def apply_gain(self, gain):
for x in xrange(len(self.data)):
self.data[x] = saturate(gain.apply(self.data[x]))
def apply_echo(self, delay):
buf = [0] * delay
gain = Gain.from_rational(-50, 100)
for x in xrange(len(self.data)):
wet = gain.apply(buf.pop(0))
# mix wet and dry
self.data[x] = saturate(self.data[x] + wet)
# feedback with delay
buf.append(self.data[x])
class AudioTrack(object):
def __init__(self, channels):
self.channels = channels
self.gain = Gain(0)
self.pan = 0
@classmethod
def fromMono(cls, left):
return cls([left])
@classmethod
def fromStereo(cls, left, right):
return cls([left, right])
@classmethod
def fromCompressed(cls, data):
magic, offset, length, compid, sample_rate, channels = struct.unpack('<IIIIII', data[:24])
data = data[24:]
if channels == 1:
track = cls([AudioStream.fromSilence(length)])
else:
track = cls([AudioStream.fromSilence(length), AudioStream.fromSilence(length)])
if compid == 0:
track.decompress32(data)
elif compid == 1:
track.decompress4(data)
else:
track.decompress8(data)
return track
def samples(self):
return len(self.channels[0].data)
def fix_length(self):
if len(self.channels) <= 1:
return
self.channels[0].extend(len(self.channels[1].data))
self.channels[1].extend(len(self.channels[0].data))
def compress32(self):
hdr = struct.pack('<IIIIII', 0x2e617564, 24, self.samples(), 0, SAMPLE_RATE, len(self.channels))
if len(self.channels) == 1:
data = ''.join([struct.pack('<i', self.channels[0].data[x]) for x in xrange(self.samples())])
else:
data = ''.join([struct.pack('<ii', self.channels[0].data[x], self.channels[1].data[x]) for x in xrange(self.samples())])
return hdr + data
def decompress32(self, data):
def decode(f):
return struct.unpack('<i', f.read(4))[0]
f = StringIO(data)
for x in xrange(self.samples()):
if len(self.channels) == 1:
self.channels[0].data[x] = decode(f)
else:
self.channels[0].data[x] = decode(f)
self.channels[1].data[x] = decode(f)
def compress8(self):
def encode(sample):
# reduce sample to (-8192..8192)
sample >>= 18
if sample >= 0:
neg = False
else:
neg = True
sample = -sample - 1
if sample <= 30:
value = 0x70 + (30 - sample) / 2
elif sample <= 94:
value = 0x60 + (94 - sample) / 4
elif sample <= 222:
value = 0x50 + (222 - sample) / 8
elif sample <= 478:
value = 0x40 + (478 - sample) / 16
elif sample <= 990:
value = 0x30 + (990 - sample) / 32
elif sample <= 2014:
value = 0x20 + (2014 - sample) / 64
elif sample <= 4062:
value = 0x10 + (4062 - sample) / 128
elif sample <= 8190:
value = 0x00 + (8190 - sample) / 258
else:
value = 0
return value | (0x00 if neg else 0x80)
hdr = struct.pack('<IIIIII', 0x2e617564, 24, self.samples(), 2, SAMPLE_RATE, len(self.channels))
if len(self.channels) == 1:
data = ''.join([struct.pack('<B', encode(self.channels[0].data[x])) for x in xrange(self.samples())])
else:
data = ''.join([struct.pack('<BB', encode(self.channels[0].data[x]), encode(self.channels[1].data[x])) for x in xrange(self.samples())])
return hdr + data
def decompress8(self, data):
def decode(f):
value = struct.unpack('<B', f.read(1))[0]
interval = value & 0xf
if (value & 0x70) == 0x00:
sample = 8190 - 258 * interval;
elif (value & 0x70) == 0x10:
sample = 4062 - 128 * interval;
elif (value & 0x70) == 0x20:
sample = 2014 - 64 * interval;
elif (value & 0x70) == 0x30:
sample = 990 - 32 * interval;
elif (value & 0x70) == 0x40:
sample = 478 - 16 * interval;
elif (value & 0x70) == 0x50:
sample = 222 - 8 * interval;
elif (value & 0x70) == 0x60:
sample = 94 - 4 * interval;
elif (value & 0x70) == 0x70:
sample = 30 - 2 * interval;
sample = sample if (value & 0x80) else -sample - 1
# scale to (SAMPLE_MIN..SAMPLE_MAX)
return sample << 18
f = StringIO(data)
for x in xrange(self.samples()):
if len(self.channels) == 1:
self.channels[0].data[x] = decode(f)
else:
self.channels[0].data[x] = decode(f)
self.channels[1].data[x] = decode(f)
def compress4(self):
data = StringIO()
hdr = struct.pack('<IIIIII', 0x2e617564, 24, self.samples(), 1, SAMPLE_RATE, len(self.channels))
comp = Compression4(data)
comp.compress(self)
return hdr + data.getvalue()
def apply_gain(self, gain):
for c in self.channels:
c.apply_gain(gain)
def apply_pan(self, pan):
if len(self.channels) <= 1:
return
leftGain, rightGain = Gain.from_pan(pan)
self.channels[0].apply_gain(leftGain)
self.channels[1].apply_gain(rightGain)
def apply_echo(self, delay):
for c in self.channels:
c.apply_echo(delay)
class TemplateGenerator(Actions):
def start(self):
#self.delay(100)
self.state['rng'] = 0xDEADBEEF12345678
self.state['tracks'] = []
def export_audio(self):
mix = AudioTrack.fromStereo(AudioStream.fromSilence(0), AudioStream.fromSilence(0))
for track in self.state['tracks']:
if track is None: continue
leftGain, rightGain = Gain.from_pan(track.pan)
if len(track.channels) == 1:
mix.channels[0].mix(track.channels[0], leftGain * track.gain)
mix.channels[1].mix(track.channels[0], rightGain * track.gain)
else:
mix.channels[0].mix(track.channels[0], leftGain * track.gain)
mix.channels[1].mix(track.channels[1], rightGain * track.gain)
return mix
def quit(self):
self.menu()
self.write('9\n')
self.read(delim='\n', expect='Thank you for | |
b:
e = self.extensionEntry
e.delete(0,"end")
e.insert(0,ext)
# Print options.
b = c.config.getBool("print_both_lines_for_matches")
if b == None: b = 0
self.printBothMatchesVar.set(b)
b = c.config.getBool("print_matching_lines")
if b == None: b = 0
self.printMatchesVar.set(b)
b = c.config.getBool("print_mismatching_lines")
if b == None: b = 0
self.printMismatchesVar.set(b)
b = c.config.getBool("print_trailing_lines")
if b == None: b = 0
self.printTrailingMismatchesVar.set(b)
n = c.config.getInt("limit_count")
b = n and n > 0
b = g.choose(b and b != 0,1,0)
self.stopAfterMismatchVar.set(b)
if b:
e = self.countEntry
e.delete(0,"end")
e.insert(0,str(n))
# bool options...
for option,var,default in (
# Whitespace options.
("ignore_blank_lines",self.ignoreBlankLinesVar,1),
("ignore_interior_whitespace",self.ignoreInteriorWhitespaceVar,0),
("ignore_leading_whitespace",self.ignoreLeadingWhitespaceVar,0),
("ignore_sentinel_lines",self.ignoreSentinelLinesVar,0),
("make_whitespace_visible", self.makeWhitespaceVisibleVar,0),
):
b = c.config.getBool(option)
if b is None: b = default
var.set(b)
if 0: # old code
b = c.config.getBool("ignore_blank_lines")
if b == None: b = 1 # unusual default.
self.ignoreBlankLinesVar.set(b)
b = c.config.getBool("ignore_interior_whitespace")
if b == None: b = 0
self.ignoreInteriorWhitespaceVar.set(b)
b = c.config.getBool("ignore_leading_whitespace")
if b == None: b = 0
self.ignoreLeadingWhitespaceVar.set(b)
b = c.config.getBool("ignore_sentinel_lines")
if b == None: b = 0
self.ignoreSentinelLinesVar.set(b)
b = c.config.getBool("make_whitespace_visible")
if b == None: b = 0
self.makeWhitespaceVisibleVar.set(b)
#@+node:ekr.20090126093408.113: *5* createFrame (tkComparePanel)
def createFrame (self):
gui = g.app.gui ; top = self.top
#@+<< create the organizer frames >>
#@+node:ekr.20090126093408.114: *6* << create the organizer frames >>
outer = Tk.Frame(self.frame, bd=2,relief="groove")
outer.pack(pady=4)
row1 = Tk.Frame(outer)
row1.pack(pady=4)
row2 = Tk.Frame(outer)
row2.pack(pady=4)
row3 = Tk.Frame(outer)
row3.pack(pady=4)
row4 = Tk.Frame(outer)
row4.pack(pady=4,expand=1,fill="x") # for left justification.
options = Tk.Frame(outer)
options.pack(pady=4)
ws = Tk.Frame(options)
ws.pack(side="left",padx=4)
pr = Tk.Frame(options)
pr.pack(side="right",padx=4)
lower = Tk.Frame(outer)
lower.pack(pady=6)
#@-<< create the organizer frames >>
#@+<< create the browser rows >>
#@+node:ekr.20090126093408.115: *6* << create the browser rows >>
for row,text,text2,command,var in (
(row1,"Compare path 1:","Ignore first line",self.onBrowse1,self.ignoreFirstLine1Var),
(row2,"Compare path 2:","Ignore first line",self.onBrowse2,self.ignoreFirstLine2Var),
(row3,"Output file:", "Use output file", self.onBrowse3,self.useOutputFileVar) ):
lab = Tk.Label(row,anchor="e",text=text,width=13)
lab.pack(side="left",padx=4)
e = Tk.Entry(row)
e.pack(side="left",padx=2)
self.browseEntries.append(e)
b = Tk.Button(row,text="browse...",command=command)
b.pack(side="left",padx=6)
b = Tk.Checkbutton(row,text=text2,anchor="w",variable=var,width=15)
b.pack(side="left")
#@-<< create the browser rows >>
#@+<< create the extension row >>
#@+node:ekr.20090126093408.116: *6* << create the extension row >>
b = Tk.Checkbutton(row4,anchor="w",var=self.limitToExtensionVar,
text="Limit directory compares to type:")
b.pack(side="left",padx=4)
self.extensionEntry = e = Tk.Entry(row4,width=6)
e.pack(side="left",padx=2)
b = Tk.Checkbutton(row4,anchor="w",var=self.appendOutputVar,
text="Append output to output file")
b.pack(side="left",padx=4)
#@-<< create the extension row >>
#@+<< create the whitespace options frame >>
#@+node:ekr.20090126093408.117: *6* << create the whitespace options frame >>
w,f = gui.create_labeled_frame(ws,caption="Whitespace options",relief="groove")
for text,var in (
("Ignore Leo sentinel lines", self.ignoreSentinelLinesVar),
("Ignore blank lines", self.ignoreBlankLinesVar),
("Ignore leading whitespace", self.ignoreLeadingWhitespaceVar),
("Ignore interior whitespace",self.ignoreInteriorWhitespaceVar),
("Make whitespace visible", self.makeWhitespaceVisibleVar) ):
b = Tk.Checkbutton(f,text=text,variable=var)
b.pack(side="top",anchor="w")
spacer = Tk.Frame(f)
spacer.pack(padx="1i")
#@-<< create the whitespace options frame >>
#@+<< create the print options frame >>
#@+node:ekr.20090126093408.118: *6* << create the print options frame >>
w,f = gui.create_labeled_frame(pr,caption="Print options",relief="groove")
row = Tk.Frame(f)
row.pack(expand=1,fill="x")
b = Tk.Checkbutton(row,text="Stop after",variable=self.stopAfterMismatchVar)
b.pack(side="left",anchor="w")
self.countEntry = e = Tk.Entry(row,width=4)
e.pack(side="left",padx=2)
e.insert(1,"1")
lab = Tk.Label(row,text="mismatches")
lab.pack(side="left",padx=2)
for padx,text,var in (
(0, "Print matched lines", self.printMatchesVar),
(20, "Show both matching lines", self.printBothMatchesVar),
(0, "Print mismatched lines", self.printMismatchesVar),
(0, "Print unmatched trailing lines",self.printTrailingMismatchesVar) ):
b = Tk.Checkbutton(f,text=text,variable=var)
b.pack(side="top",anchor="w",padx=padx)
self.printButtons.append(b)
# To enable or disable the "Print both matching lines" button.
b = self.printButtons[0]
b.configure(command=self.onPrintMatchedLines)
spacer = Tk.Frame(f)
spacer.pack(padx="1i")
#@-<< create the print options frame >>
#@+<< create the compare buttons >>
#@+node:ekr.20090126093408.119: *6* << create the compare buttons >>
for text,command in (
("Compare files", self.onCompareFiles),
("Compare directories",self.onCompareDirectories) ):
b = Tk.Button(lower,text=text,command=command,width=18)
b.pack(side="left",padx=6)
#@-<< create the compare buttons >>
gui.center_dialog(top) # Do this _after_ building the dialog!
self.finishCreate()
top.protocol("WM_DELETE_WINDOW", self.onClose)
#@+node:ekr.20090126093408.120: *5* setIvarsFromWidgets
def setIvarsFromWidgets (self):
# File paths: checks for valid file name.
e = self.browseEntries[0]
self.fileName1 = e.get()
e = self.browseEntries[1]
self.fileName2 = e.get()
# Ignore first line settings.
self.ignoreFirstLine1 = self.ignoreFirstLine1Var.get()
self.ignoreFirstLine2 = self.ignoreFirstLine2Var.get()
# Output file: checks for valid file name.
if self.useOutputFileVar.get():
e = self.browseEntries[2]
name = e.get()
if name != None and len(name) == 0:
name = None
self.outputFileName = name
else:
self.outputFileName = None
# Extension settings.
if self.limitToExtensionVar.get():
self.limitToExtension = self.extensionEntry.get()
if len(self.limitToExtension) == 0:
self.limitToExtension = None
else:
self.limitToExtension = None
self.appendOutput = self.appendOutputVar.get()
# Whitespace options.
self.ignoreBlankLines = self.ignoreBlankLinesVar.get()
self.ignoreInteriorWhitespace = self.ignoreInteriorWhitespaceVar.get()
self.ignoreLeadingWhitespace = self.ignoreLeadingWhitespaceVar.get()
self.ignoreSentinelLines = self.ignoreSentinelLinesVar.get()
self.makeWhitespaceVisible = self.makeWhitespaceVisibleVar.get()
# Print options.
self.printMatches = self.printMatchesVar.get()
self.printMismatches = self.printMismatchesVar.get()
self.printTrailingMismatches = self.printTrailingMismatchesVar.get()
if self.printMatches:
self.printBothMatches = self.printBothMatchesVar.get()
else:
self.printBothMatches = False
if self.stopAfterMismatchVar.get():
try:
count = self.countEntry.get()
self.limitCount = int(count)
except: self.limitCount = 0
else:
self.limitCount = 0
#@+node:ekr.20090126093408.121: *4* bringToFront
def bringToFront(self):
self.top.deiconify()
self.top.lift()
#@+node:ekr.20090126093408.122: *4* browser
def browser (self,n):
types = [
("C/C++ files","*.c"),
("C/C++ files","*.cpp"),
("C/C++ files","*.h"),
("C/C++ files","*.hpp"),
("Java files","*.java"),
("Lua files", "*.lua"),
("Pascal files","*.pas"),
("Python files","*.py"),
("Text files","*.txt"),
("All files","*") ]
fileName = tkFileDialog.askopenfilename(
title="Choose compare file" + n,
filetypes=types,
defaultextension=".txt")
if fileName and len(fileName) > 0:
# The dialog also warns about this, so this may never happen.
if not g.os_path_exists(fileName):
self.show("not found: " + fileName)
fileName = None
else: fileName = None
return fileName
#@+node:ekr.20090126093408.123: *4* Event handlers...
#@+node:ekr.20090126093408.124: *5* onBrowse...
def onBrowse1 (self):
fileName = self.browser("1")
if fileName:
e = self.browseEntries[0]
e.delete(0,"end")
e.insert(0,fileName)
self.top.deiconify()
def onBrowse2 (self):
fileName = self.browser("2")
if fileName:
e = self.browseEntries[1]
e.delete(0,"end")
e.insert(0,fileName)
self.top.deiconify()
def onBrowse3 (self): # Get the name of the output file.
fileName = tkFileDialog.asksaveasfilename(
initialfile = self.defaultOutputFileName,
title="Set output file",
filetypes=[("Text files", "*.txt")],
defaultextension=".txt")
if fileName and len(fileName) > 0:
self.defaultOutputFileName = fileName
self.useOutputFileVar.set(1) # The user will expect this.
e = self.browseEntries[2]
e.delete(0,"end")
e.insert(0,fileName)
#@+node:ekr.20090126093408.125: *5* onClose
def onClose (self):
self.top.withdraw()
#@+node:ekr.20090126093408.126: *5* onCompare...
def onCompareDirectories (self):
self.setIvarsFromWidgets()
self.compare_directories(self.fileName1,self.fileName2)
def onCompareFiles (self):
self.setIvarsFromWidgets()
self.compare_files(self.fileName1,self.fileName2)
#@+node:ekr.20090126093408.127: *5* onPrintMatchedLines
def onPrintMatchedLines (self):
v = self.printMatchesVar.get()
b = self.printButtons[1]
state = g.choose(v,"normal","disabled")
b.configure(state=state)
#@-others
#@+node:ekr.20090126093408.190: *3* wxKeyHandlerClass (keyHandlerClass)
class wxKeyHandlerClass (leoKeys.keyHandlerClass):
'''wxWidgets overrides of base keyHandlerClass.'''
#@+others
#@+node:ekr.20090126093408.191: *4* wxKey.__init__
def __init__(self,c,useGlobalKillbuffer=False,useGlobalRegisters=False):
# g.trace('wxKeyHandlerClass',g.callers())
self.widget = None # Set in finishCreate.
# Init the base class.
leoKeys.keyHandlerClass.__init__(self,c,useGlobalKillbuffer,useGlobalRegisters)
#@+node:ekr.20090126093408.192: *4* wxKey.finishCreate
def finishCreate (self):
k = self ; c = k.c
leoKeys.keyHandlerClass.finishCreate(self) # Call the base class.
# In the Tk version, this is done in the editor logic.
c.frame.body.createBindings(w=c.frame.body.bodyCtrl)
# k.dumpMasterBindingsDict()
self.widget = c.frame.minibuffer.ctrl
self.setLabelGrey()
#@-others
#@+node:ekr.20090126093408.194: *3* wxLeoApp class
class wxLeoApp (wx.App):
#@+others
#@+node:ekr.20090126093408.195: *4* OnInit (wxLeoApp)
def OnInit(self):
self.SetAppName("Leo")
# Add some pre-defined default colors.
self.leo_colors = ('leo blue','leo pink','leo yellow')
wx.TheColourDatabase.AddColour('leo blue', wx.Color(240,248,255)) # alice blue
wx.TheColourDatabase.AddColour('leo pink', wx.Color(255,228,225)) # misty rose
wx.TheColourDatabase.AddColour('leo yellow',wx.Color(253,245,230)) # old lace
return True
#@+node:ekr.20090126093408.196: *4* OnExit
def OnExit(self):
return True
#@-others
#@+node:ekr.20090126093408.197: *3* wxLeoBody class (leoBody)
class wxLeoBody (leoFrame.leoBody):
"""A class to create a wxPython body pane."""
#@+others
#@+node:ekr.20090126093408.198: *4* Birth & death (wxLeoBody)
#@+node:ekr.20090126093408.199: *5* wxBody.__init__
def __init__ (self,frame,parentFrame):
# Init the base class: calls createControl.
leoFrame.leoBody.__init__(self,frame,parentFrame)
self.bodyCtrl = self.createControl(frame,parentFrame)
self.colorizer = leoColor.colorizer(self.c)
self.keyDownModifiers = None
self.forceFullRecolorFlag = False
#@+node:ekr.20090126093408.200: *5* wxBody.createControl
def createControl (self,frame,parentFrame):
w = g.app.gui.bodyTextWidget(
self.c,
parentFrame,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
name = 'body', # Must be body for k.masterKeyHandler.
)
return w
#@+node:ekr.20090126093408.201: *5* wxBody.createBindings NOT USED AT PRESENT
def createBindings (self,w=None):
'''(wxBody) Create gui-dependent bindings.
These are *not* made in nullBody instances.'''
return ###
frame = self.frame ; c = self.c ; k = c.k
if not w: w = self.bodyCtrl
# g.trace('wxBody')
w.bind('<Key>', k.masterKeyHandler)
for kind,func,handler in (
#('<Button-1>', frame.OnBodyClick, k.masterClickHandler),
#('<Button-3>', frame.OnBodyRClick, k.masterClick3Handler),
#('<Double-1>', frame.OnBodyDoubleClick, k.masterDoubleClickHandler),
#('<Double-3>', None, k.masterDoubleClick3Handler),
#('<Button-2>', frame.OnPaste, k.masterClickHandler),
):
def bodyClickCallback(event,handler=handler,func=func):
return handler(event,func)
w.bind(kind,bodyClickCallback)
#@+node:ekr.20090126093408.202: *5* wxBody.setEditorColors
def setEditorColors (self,bg,fg):
pass
#@+node:ekr.20090126093408.203: *4* Tk wrappers (wxBody)
def cget(self,*args,**keys): pass # to be removed from Leo's core.
def configure (self,*args,**keys): pass # to be removed from Leo's core.
def hasFocus (self): return self.bodyCtrl.getFocus()
def setFocus (self):
# g.trace('body')
return self.bodyCtrl.setFocus()
SetFocus = setFocus
getFocus = hasFocus
def scheduleIdleTimeRoutine (self,function,*args,**keys): g.trace()
def tag_add (self,*args,**keys): return self.bodyCtrl.tag_add(*args,**keys)
def tag_bind (self,*args,**keys): return self.bodyCtrl.tag_bind(*args,**keys)
def tag_configure (self,*args,**keys): return self.bodyCtrl.tag_configure(*args,**keys)
def tag_delete (self,*args,**keys): return self.bodyCtrl.tag_delete(*args,**keys)
def tag_remove (self,*args,**keys): return self.bodyCtrl.tag_remove(*args,**keys)
#@+node:ekr.20090126093408.204: *4* onBodyChanged (wxBody: calls leoBody.onBodyChanged)
def onBodyChanged (self,undoType,oldSel=None,oldText=None,oldYview=None):
if g.app.killed or self.c.frame.killed: return
c = self.c ; w = c.frame.body.bodyCtrl
if not c: return g.trace('no c!')
p = c.currentPosition()
if not p: return g.trace('no | |
**kwargs):
self.update_query = kwargs
super(UpdateQuery, self).__init__(_model)
def clone(self):
query = UpdateQuery(self.model, **self.update_query)
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
query._table_alias = dict(self._table_alias)
return query
def parse_update(self):
sets = {}
for k, v in self.update_query.iteritems():
if k in self.model._meta.columns:
k = self.model._meta.columns[k].name
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
if not isinstance(v, (F, R)):
v = field.db_value(v)
sets[field.db_column] = v
return sets
def sql(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
set_statement = self.parse_update()
params = []
update_params = []
alias = alias_map.get(self.model)
for k, v in sorted(set_statement.items(), key=lambda (k, v): k):
if isinstance(v, F):
value = self.parse_f(v, v.model or self.model, alias_map)
elif isinstance(v, R):
value, rparams = v.sql_update()
value = value % self.interpolation
params.append(rparams)
else:
params.append(v)
value = self.interpolation
update_params.append('%s=%s' % (self.combine_field(alias, k), value))
update = 'UPDATE %s SET %s' % (
self.qn(self.model._meta.db_table), ', '.join(update_params))
where = ' AND '.join(where)
pieces = [update]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.rows_affected(result)
class DeleteQuery(BaseQuery):
def clone(self):
query = DeleteQuery(self.model)
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
query._table_alias = dict(self._table_alias)
return query
def sql(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
params = []
delete = 'DELETE FROM %s' % (self.qn(self.model._meta.db_table))
where = ' AND '.join(where)
pieces = [delete]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.rows_affected(result)
class InsertQuery(BaseQuery):
def __init__(self, _model, **kwargs):
query = _model._meta.get_default_dict()
query.update(kwargs)
self.insert_query = query
super(InsertQuery, self).__init__(_model)
def parse_insert(self):
cols = []
vals = []
for k, v in sorted(self.insert_query.items(), key=lambda (k, v): k):
if k in self.model._meta.columns:
k = self.model._meta.columns[k].name
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
cols.append(self.qn(field.db_column))
vals.append(field.db_value(v))
return cols, vals
def sql(self):
cols, vals = self.parse_insert()
insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.qn(self.model._meta.db_table),
','.join(cols),
','.join(self.interpolation for v in vals)
)
return insert, vals
def where(self, *args, **kwargs):
raise AttributeError('Insert queries do not support WHERE clauses')
def join(self, *args, **kwargs):
raise AttributeError('Insert queries do not support JOINs')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.last_insert_id(result, self.model)
def model_or_select(m_or_q):
"""
Return both a model and a select query for the provided model *OR* select
query.
"""
if isinstance(m_or_q, BaseQuery):
return (m_or_q.model, m_or_q)
else:
return (m_or_q, m_or_q.select())
def convert_lookup(model, joins, lookup):
"""
Given a model, a graph of joins, and a lookup, return a tuple containing
a normalized lookup:
(model actually being queried, updated graph of joins, normalized lookup)
"""
operations = model._meta.database.adapter.operations
pieces = lookup.split('__')
operation = None
query_model = model
if len(pieces) > 1:
if pieces[-1] in operations:
operation = pieces.pop()
lookup = pieces.pop()
# we have some joins
if len(pieces):
for piece in pieces:
# piece is something like 'blog' or 'entry_set'
joined_model = None
for field in query_model._meta.get_fields():
if not isinstance(field, ForeignKeyField):
continue
if piece in (field.name, field.db_column, field.related_name):
joined_model = field.to
if not joined_model:
try:
joined_model = query_model._meta.reverse_relations[piece]
except KeyError:
raise ValueError('Unknown relation: "%s" of "%s"' % (
piece,
query_model,
))
joins.setdefault(query_model, set())
joins[query_model].add(joined_model)
query_model = joined_model
if operation:
lookup = '%s__%s' % (lookup, operation)
return query_model, joins, lookup
def filter_query(model_or_query, *args, **kwargs):
"""
Provide a django-like interface for executing queries
"""
model, select_query = model_or_select(model_or_query)
query = {} # mapping of models to queries
joins = {} # a graph of joins needed, passed into the convert_lookup function
# traverse Q() objects, find any joins that may be lurking -- clean up the
# lookups and assign the correct model
def fix_q(node_or_q, joins):
if isinstance(node_or_q, Node):
for child in node_or_q.children:
fix_q(child, joins)
elif isinstance(node_or_q, Q):
new_query = {}
curr_model = node_or_q.model or model
for raw_lookup, value in node_or_q.query.items():
query_model, joins, lookup = convert_lookup(curr_model, joins, raw_lookup)
new_query[lookup] = value
node_or_q.model = query_model
node_or_q.query = new_query
for node_or_q in args:
fix_q(node_or_q, joins)
# iterate over keyword lookups and determine lookups and necessary joins
for raw_lookup, value in kwargs.items():
queried_model, joins, lookup = convert_lookup(model, joins, raw_lookup)
query.setdefault(queried_model, [])
query[queried_model].append((lookup, value))
def follow_joins(current, query):
if current in joins:
for joined_model in joins[current]:
query = query.switch(current)
if joined_model not in query._joined_models:
query = query.join(joined_model)
query = follow_joins(joined_model, query)
return query
select_query = follow_joins(model, select_query)
for node in args:
select_query = select_query.where(node)
for model, lookups in query.items():
qargs, qkwargs = [], {}
for lookup in lookups:
if isinstance(lookup, tuple):
qkwargs[lookup[0]] = lookup[1]
else:
qargs.append(lookup)
select_query = select_query.switch(model).where(*qargs, **qkwargs)
return select_query
def annotate_query(select_query, related_model, aggregation):
"""
Perform an aggregation against a related model
"""
aggregation = aggregation or Count(related_model._meta.pk_name)
model = select_query.model
select_query = select_query.switch(model)
cols = select_query.query
# ensure the join is there
if related_model not in select_query._joined_models:
select_query = select_query.join(related_model).switch(model)
# query for it
if isinstance(cols, dict):
selection = cols
group_by = cols[model]
elif isinstance(cols, basestring):
selection = {model: [cols]}
if cols == '*':
group_by = model
else:
group_by = [col.strip() for col in cols.split(',')]
elif isinstance(cols, (list, tuple)):
selection = {model: cols}
group_by = cols
else:
raise ValueError('Unknown type passed in to select query: "%s"' % type(cols))
# query for the related object
if related_model in selection:
selection[related_model].append(aggregation)
else:
selection[related_model] = [aggregation]
select_query.query = selection
if group_by == ['*']:
return select_query
else:
return select_query.group_by(group_by)
########################################################################################################################################################
### Data Definition Language (DDL): columns, fields, data types
###
class Column(object):
"Represents physical storage of a given type in DB. Used by Field class"
db_field = ''
template = '%(column_type)s'
def __init__(self, **attributes):
self.attributes = self.get_attributes()
self.attributes.update(**attributes)
def get_attributes(self):
return {}
def python_value(self, value):
return value
def db_value(self, value):
return value
def render(self, db):
params = {'column_type': db.column_for_field_type(self.db_field)}
params.update(self.attributes)
return self.template % params
class VarCharColumn(Column):
db_field = 'string'
template = '%(column_type)s(%(len)d)'
def get_attributes(self):
return {'len': 255}
def db_value(self, value):
value = unicode(value or '')
return value[:self.attributes['len']]
class TextColumn(Column):
db_field = 'text'
def db_value(self, value):
return value or ''
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
class DateTimeColumn(Column):
db_field = 'datetime'
def get_attributes(self):
return {
'formats': [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
}
def python_value(self, value):
if isinstance(value, basestring):
return format_date_time(value, self.attributes['formats'])
return value
class TimestampColumn(DateTimeColumn):
db_field = 'timestamp'
class DateColumn(Column):
db_field = 'date'
def get_attributes(self):
return {
'formats': [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
}
def python_value(self, value):
if isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.attributes['formats'], pp)
elif isinstance(value, datetime.datetime):
return value.date()
return value
class TimeColumn(Column):
db_field = 'time'
def get_attributes(self):
return {
'formats': [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
}
def python_value(self, value):
if isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.attributes['formats'], pp)
elif isinstance(value, datetime.datetime):
return value.time()
return value
class IntegerColumn(Column):
db_field = 'integer'
def db_value(self, value):
return value or 0
def python_value(self, value):
if value is not None:
return int(value)
class BigIntegerColumn(IntegerColumn):
db_field = 'bigint'
class BooleanColumn(Column):
db_field = 'boolean'
def db_value(self, value):
return bool(value)
def python_value(self, value):
if value is not None:
return bool(value)
class FloatColumn(Column):
db_field = 'float'
def db_value(self, value):
return value or 0.0
def python_value(self, value):
if value is not None:
return float(value)
class DoubleColumn(FloatColumn):
db_field = 'double'
class DecimalColumn(Column):
db_field = 'decimal'
template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def get_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
'auto_round': False,
'rounding': decimal.DefaultContext.rounding,
}
def db_value(self, value):
D = decimal.Decimal
if not value:
return D(0)
if self.attributes['auto_round']:
exp = D(10)**(-self.attributes['decimal_places'])
return D(str(value)).quantize(exp, rounding=self.attributes['rounding'])
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
class PrimaryKeyColumn(Column):
db_field = 'primary_key'
class PrimaryKeySequenceColumn(PrimaryKeyColumn):
db_field = 'primary_key_with_sequence'
def qdict(op):
def fn(self, rhs):
return Q(self.model, **{'%s__%s' % (self.name, op): rhs})
return fn
class Field(object):
"""Represents logical field of a row and column of a DB table.
Uses instances of Column internally, but extends them with semantics: | |
#! /usr/bin/env python
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import os
from scriptine import run, path, log, command
import re
import numpy as np
# Uncomment for mac os users
import matplotlib
# matplotlib.use('TkAgg')
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
#Uncomment for 4k screens
# matplotlib.rcParams.update({'font.size': 22})
# PyDial modules
import Simulate
import Texthub
from utils import Settings
from utils import ContextLogger
from ontology import Ontology
import utils.ContextLogger as clog
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Remove tensorflow deprecation warnings
#import tensorflow.python.util.deprecation as deprecation
#deprecation._PRINT_DEPRECATION_WARNINGS = False
logger = None
tracedialog = 2
policy_dir = ""
conf_dir = ""
log_dir = ""
logfile = ""
gnumtrainbatches = 0
gtraindialogsperbatch = 0
gnumbatchtestdialogs = 0
gnumtestdialogs = 0
gtrainerrorrate = 0
gtesterrorrate = 0
gtrainsourceiteration = 0
gtesteverybatch = False
gpscale = 1
gplotnum = 1
gbatchnum = 0
isSingleDomain = False
taskID = ""
domain = ""
domains = []
policytype = "hdc"
policytypes = {}
def help_command():
""" Provide an overview of pydial functionality
"""
print("\n pydial - command line interface to PyDial")
print('""""""""""""""""""""""""""""""""""""""""""""')
print(' o Runs simulator to train and test policies')
print(' o Plots learning rates and performance vs error rate')
print(' o Runs texthub in multi-domain chat mode\n')
print('Basic usage:')
print(' a) Make pydial.py executable and add a symbolic link to it (eg pydial) from your')
print(' local bin directory. Create a directory called ID and cd into it.\n')
print(" b) create a config file and add an exec_config section eg:\n")
print(' [exec_config]')
print(' domain = CamRestaurants # specific train/test domain')
print(' policytype = gp # type of policy to train/test')
print(' configdir = cfgdir # folder to store configs')
print(' logfiledir = logdir # folder to store logfiles')
print(' numtrainbatches = 2 # num training batches (iterations)')
print(' traindialogsperbatch = 10 # num dialogs per batch')
print(' numbatchtestdialogs = 20 # num dialogs to eval each batch')
print(' trainsourceiteration = 0 # index of initial source policy to update')
print(' testiteration = 1 # policy iteration to test')
print(' numtestdialogs = 10 # num dialogs per test')
print(' trainerrorrate = 0 # train error rate in %')
print(' testerrorrate = 0 # test error rate in %')
print(' testeverybatch = True # enable batch testing\n')
print(' by convention the config file name for training and testing should be of the')
print(' form ID-policytype-domain.cfg where ID is a user-defined id.')
print(' (There is more detail on naming conventions below.)')
print(' Also unless the current directory is the same as the PyDial root')
print(' make sure that [GENERAL]root points to root of the PyDial source tree.\n')
print(' c) to train a policy as specified in the config file, type')
print(' > pydial train config')
print(' if trainsourceiteration=0 this creates a new policy in n batches where')
print(' n=numtrainbatches, otherwise an existing policy is trained further.\n')
print(' d) to test a policy as specified in the config file, type')
print(' > pydial test config\n')
print(' texthub.py can be invoked to interact with a policy from the keyboard by:')
print(' > pydial chat config')
print(' Note that train and test must refer to a specific domain as per [exec_config] domain')
print(' whereas chat mode can specify multiple domains via the [GENERAL]domains variable.\n')
print(' e) for convenience, many config parameters can be overridden on the command line, eg')
print(' > pydial train config --trainerrorrate=20')
print(' > pydial test config --iteration=4 --trainerrorrate=20 --testerrorrate=50')
print(' to train a policy at 20% error rate and test the 4th iteration at 50% error rate.')
print(' A range of test error rates can be specified as a triple (stErr,enErr,stepSize), eg')
print(" > pydial test config --iteration=4 --trainerrorrate=20 --testerrorrate='(0,50,10)'")
print(' to test a policy at 0%, 10%, 20%, 30%, 40%, and 50% error rates.\n')
print(' f) logfiles for each train/test run are stored in logfiledir.')
print(' The plot command scans one or more logfiles and extract info to plot eg')
print(' > pydial plot logdir/*train*')
print(' Setting the option --printtab, also tabulates the performance data.\n')
print(' All policy information is stored in the policydir specified in the corresponding ')
print(' config file section with name [policy_domain]. Since pydial overrides some config')
print(' params, the actual configs used for each run are recorded in configdir.\n')
print(' Derived file naming convention:')
print(" Policyname: ID-poltype-domain-TrainErrRate eg S0-gp-CamRestaurants-20")
print(" Policy: ID-poltype-domain-TrainErrRate.Iteration eg S0-gp-CamRestaurants-20.3")
print(" Policyfile: ID-poltype-domain-TrainErrRate.Iteration.ext eg S0-gp-CamRestaurants-20.3.dct")
print(" TrainLogfiles: PolicyName.IterationRange.train.log eg S0-gp-CamRestaurants-20.1-3.train.log")
print(" EvalLogfiles: Policy.eval.ErrorRange.eval.log eg S0-gp-CamRestaurants-20.3.eval.00-50.log\n")
print("To get further help:")
print(" pydial list of available commands")
print(" pydial help this overview")
print(" pydial cmd --help help for a specific command\n")
def conventionCheck(name):
global taskID, domain, policytype
try:
if name.find('-') < 0:
raise Exception('no separators')
(taskID,p,d)=name.split('-')
if p != policytype:
raise Exception('policytype != config param')
if d != domain:
raise Exception('domain name != config param')
except Exception as x:
pass#log.warn("Non-standard config name [%s] (preferred format ID-policytype-domain.cfg)", x.args[0])
def getConfigId(configFileName):
i = configFileName.rfind('.')
if i < 0 or configFileName[i+1:] != 'cfg':
print(("Config file %s does not have required .cfg extension" % configFileName))
exit(0)
cfg = path(configFileName)
if not cfg.isfile():
print(("Config file %s does not exist" % configFileName))
exit(0)
id = configFileName[:i]
j = id.rfind('/')
if j >= 0: id = id[j+1:]
return id
def getOptionalConfigVar(configvarname, default='', section='exec_config'):
value = default
if Settings.config.has_option(section, configvarname):
value = Settings.config.get(section, configvarname)
return value
def getRequiredDirectory(directoryname, section='exec_config'):
assert Settings.config.has_option(section, directoryname),\
"Value {} in section {} is missing.".format(directoryname, section)
dir = Settings.config.get(section, directoryname)
if dir[-1] != '/': dir = dir+'/'
return dir
def getOptionalConfigInt(configvarname, default='0',section='exec_config'):
value = default
if Settings.config.has_option(section, configvarname):
try:
value = Settings.config.getint(section, configvarname)
except ValueError:
value = Settings.config.get(section, configvarname)
return value
def getOptionalConfigBool(configvarname, default='False', section='exec_config'):
value = default
if Settings.config.has_option(section, configvarname):
value = Settings.config.getboolean(section, configvarname)
return value
def initialise(configId, config_file, seed, mode, trainerrorrate=None, trainsourceiteration=None,
numtrainbatches=None, traindialogsperbatch=None, numtestdialogs=None,
testerrorrate=None, testenderrorrate=None, iteration=None, traindomains=None, testdomains=None,
dbprefix=None):
global logger, logfile, traceDialog, isSingleDomain
global policy_dir, conf_dir, log_dir
global gnumtrainbatches, gtraindialogsperbatch, gnumbatchtestdialogs, gnumtestdialogs
global gtrainerrorrate, gtesterrorrate, gtrainsourceiteration
global taskID, domain, domains, policytype, gtesteverybatch, gpscale
global gdeleteprevpolicy, isSingleModel
global policytypes
if seed is not None:
seed = int(seed)
seed = Settings.init(config_file, seed)
taskID = 'ID'
isSingleDomain = getOptionalConfigBool("singledomain", isSingleDomain, "GENERAL")
isSingleModel = getOptionalConfigBool("singlemodel", False, "policycommittee")
traceDialog = getOptionalConfigInt("tracedialog", tracedialog, "GENERAL")
domain = getOptionalConfigVar("domains", '', "GENERAL")
if len(domain.split(',')) > 1 and isSingleDomain:
logger.error('It cannot be singledomain and have several domains defined, Check config file.')
if isSingleDomain:
if Settings.config.has_section('policy_' + domain):
policytype = getOptionalConfigVar('policytype', policytype, 'policy_' + domain)
else:
policytype = getOptionalConfigVar('policytype', policytype, 'policy')
conventionCheck(configId)
else:
domains = getOptionalConfigVar("domains", "", "GENERAL").split(',')
policytypes = {}
for domain in domains:
if Settings.config.has_section('policy_' + domain):
policytypes[domain] = getOptionalConfigVar('policytype', policytype, 'policy_' + domain)
else:
policytypes[domain] = getOptionalConfigVar('policytype', policytype, 'policy')
# if gp, make sure to save required scale before potentially overriding
if isSingleDomain:
if policytype == "gp":
if Settings.config.has_section("gpsarsa_" + domain):
try:
gpscale = Settings.config.getint("gpsarsa_" + domain, "scale")
except ValueError:
gpscale = Settings.config.get("gpsarsa_" + domain, "scale")
else:
try:
gpscale = Settings.config.getint("gpsarsa", "scale")
except ValueError:
gpscale = Settings.config.get("gpsarsa", "scale")
else:
gpscales = {}
for domain in domains:
if policytypes[domain] == "gp":
if Settings.config.has_section("gpsarsa_" + domain):
try:
gpscales[domain] = Settings.config.getint("gpsarsa_"+ domain, "scale")
except ValueError:
gpscales[domain] = Settings.config.get("gpsarsa_"+ domain, "scale")
else:
try:
gpscales[domain] = Settings.config.getint("gpsarsa", "scale")
except ValueError:
gpscales[domain] = Settings.config.get("gpsarsa", "scale")
# if deep-rl model, make sure to set the correct n_in
if isSingleDomain:
if Settings.config.has_section("dqnpolicy"):
if domain == 'CamRestaurants':
Settings.config.set("dqnpolicy", 'n_in', '268')
elif domain == 'SFRestaurants':
Settings.config.set("dqnpolicy", 'n_in', '636')
elif domain == 'Laptops11':
Settings.config.set("dqnpolicy", 'n_in', '257')
# TODO: set rest of environments and multidomain
# Get required folders and create if necessary
log_dir = getRequiredDirectory("logfiledir")
conf_dir = getRequiredDirectory("configdir")
if isSingleDomain:
if policytype != 'hdc':
if Settings.config.has_section("policy_"+domain):
policy_dir = getRequiredDirectory("policydir", "policy_"+domain)
| |
node, or set of nodes, as returned by find()
IMPORTANT: match_from and match_to, if created by calls to find(), MUST use different node dummy names;
e.g., make sure that for match_from, find() used the option: dummy_node_name="from"
and for match_to, find() used the option: dummy_node_name="to"
:param rel_name: The name to give to the new relationship between the 2 specified nodes
:param rel_props: TODO: not currently used.
Unclear what multiple calls would do in this case: update the props or create a new relationship???
:return: The number of edges added. If none got added, or in case of error, an Exception is raised
"""
match_from = CypherUtils.validate_and_standardize(match_from, dummy_node_name="from") # Validate, and possibly create, the match dictionary
match_to = CypherUtils.validate_and_standardize(match_to, dummy_node_name="to") # Validate, and possibly create, the match dictionary
# Make sure there's no conflict in node dummy names
CypherUtils.check_match_compatibility(match_from, match_to)
# Unpack needed values from the match_from and match_to structures
nodes_from = CypherUtils.extract_node(match_from)
nodes_to = CypherUtils.extract_node(match_to)
where_clause = CypherUtils.combined_where([match_from, match_to]) # Combine the two WHERE clauses from each of the matches,
# and also prefix (if appropriate) the WHERE keyword
# Prepare the query to add the requested edge between the given nodes
q = f'''
MATCH {nodes_from}, {nodes_to}
{where_clause}
MERGE (from) -[:{rel_name}]-> (to)
'''
# Merge the data-binding dict's
combined_data_binding = CypherUtils.combined_data_binding([match_from, match_to])
self.debug_print(q, combined_data_binding, "add_edges")
result = self.update_query(q, combined_data_binding)
if self.debug:
print(" result of update_query in add_edges(): ", result)
number_relationships_added = result.get("relationships_created", 0) # If field isn't present, return a 0
if number_relationships_added == 0: # This could be more than 1: see notes above
raise Exception(f"The requested relationship {rel_name} was NOT added")
return number_relationships_added
def remove_edges(self, match_from: Union[int, dict], match_to: Union[int, dict], rel_name) -> int:
"""
Remove one or more edges (relationships)
originating in any of the nodes specified by the match_from specifications,
and terminating in any of the nodes specified by the match_to specifications,
optionally matching the given relationship name.
Return the number of edges removed; if none found, or in case of error, raise an Exception.
Notes: - the nodes themselves are left untouched
- more than 1 node could be present in either of the matches
- the number of relationships deleted could be more than 1 even with a single "from" node and a single "to" node;
Neo4j allows multiple relationships with the same name between the same two nodes,
as long as the relationships differ in their properties
:param match_from: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param match_to: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
IMPORTANT: match_from and match_to, if created by calls to find(), MUST use different node dummy names;
e.g., make sure that for match_from, find() used the option: dummy_node_name="from"
and for match_to, find() used the option: dummy_node_name="to"
:param rel_name: (OPTIONAL) The name of the relationship to delete between the 2 specified nodes;
if None or a blank string, all relationships between those 2 nodes will get deleted
:return: The number of edges removed. If none got deleted, or in case of error, an Exception is raised
"""
match_from = CypherUtils.validate_and_standardize(match_from, dummy_node_name="from") # Validate, and possibly create, the match dictionary
match_to = CypherUtils.validate_and_standardize(match_to, dummy_node_name="to") # Validate, and possibly create, the match dictionary
# Make sure there's no conflict in the dummy node names
CypherUtils.check_match_compatibility(match_from, match_to)
# Unpack needed values from the match_from and match_to structures
nodes_from = CypherUtils.extract_node(match_from)
nodes_to = CypherUtils.extract_node(match_to)
where_clause = CypherUtils.combined_where([match_from, match_to]) # Combine the two WHERE clauses from each of the matches,
# and also prefix (if appropriate) the WHERE keyword
# Prepare the query
if rel_name is None or rel_name == "": # Delete all relationships
q = f'''
MATCH {nodes_from} -[r]-> {nodes_to}
{where_clause}
DELETE r
'''
else: # Delete a specific relationship
q = f'''
MATCH {nodes_from} -[r :{rel_name}]-> {nodes_to}
{where_clause}
DELETE r
'''
# Merge the data-binding dict's
combined_data_binding = CypherUtils.combined_data_binding([match_from, match_to])
self.debug_print(q, combined_data_binding, "remove_edges")
result = self.update_query(q, combined_data_binding)
if self.debug:
print(" result of update_query in remove_edges(): ", result)
number_relationships_deleted = result.get("relationships_deleted", 0) # If field isn't present, return a 0
if number_relationships_deleted == 0: # This could be more than 1: see notes above
raise Exception("No relationship was deleted")
return number_relationships_deleted
def edges_exists(self, match_from: Union[int, dict], match_to: Union[int, dict], rel_name: str) -> bool:
"""
Return True if one or more edges (relationships) with the specified name exist in the direction
from and to the nodes (individual nodes or set of nodes) specified in the first two arguments.
:param match_from: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
:param match_to: EITHER an integer with a Neo4j node id,
OR a dictionary of data to identify a node, or set of nodes, as returned by find()
IMPORTANT: match_from and match_to, if created by calls to find(), MUST use different node dummy names;
e.g., make sure that for match_from, find() used the option: dummy_node_name="from"
and for match_to, find() used the option: dummy_node_name="to"
:param rel_name: The name of the relationship to look for between the 2 specified nodes
:return: True if the relationship was found, or False if not
"""
match_from = CypherUtils.validate_and_standardize(match_from, dummy_node_name="from") # Validate, and possibly create, the match dictionary
match_to = CypherUtils.validate_and_standardize(match_to, dummy_node_name="to") # Validate, and possibly create, the match dictionary
# Make sure there's no conflict in the dummy node names
CypherUtils.check_match_compatibility(match_from, match_to)
# Unpack needed values from the match_from and match_to structures
nodes_from = CypherUtils.extract_node(match_from)
nodes_to = CypherUtils.extract_node(match_to)
where_clause = CypherUtils.combined_where([match_from, match_to]) # Combine the two WHERE clauses from each of the matches,
# and also prefix (if appropriate) the WHERE keyword
# Prepare the query
q = f'''
MATCH {nodes_from} -[r :{rel_name}]-> {nodes_to}
{where_clause}
RETURN r
'''
# Merge the data-binding dict's
combined_data_binding = CypherUtils.combined_data_binding([match_from, match_to])
self.debug_print(q, combined_data_binding, "edges_exists")
result = self.query(q, combined_data_binding)
if self.debug:
print(" result of query in edges_exists(): ", result)
if len(result) == 0: # This could be more than 1
return False
else:
return True
def reattach_node(self, node, old_attachment, new_attachment, rel_name:str): # TODO: test
"""
Sever the relationship with the given name from the given node to the old_attachment node,
and re-create it from the given node to the new_attachment node
:param node: A "match" structure, as returned by find(). Use dummy_node_name "node"
:param old_attachment: A "match" structure, as returned by find(). Use dummy_node_name "old"
:param new_attachment: A "match" structure, as returned by find(). Use dummy_node_name "new"
:param rel_name:
:return: True if the process was successful, or False otherwise
"""
# Unpack the data to locate the 3 affected nodes
node_start = CypherUtils.extract_node(node)
node_old = CypherUtils.extract_node(old_attachment)
node_new = CypherUtils.extract_node(new_attachment)
# Combine the 3 WHERE clauses, and also prefix (if appropriate) the WHERE keyword
where_clause = CypherUtils.combined_where([node, old_attachment, new_attachment])
q = f'''
MATCH {node_start} -[r :{rel_name}]-> {node_old}, {node_new}
{where_clause}
MERGE (node) -[:{rel_name}]-> (new)
'''
# Merge all the 3data-binding dict's
combined_data_binding = CypherUtils.combined_data_binding([node, old_attachment, new_attachment])
self.debug_print(q, combined_data_binding, "add_edges")
result = self.update_query(q, combined_data_binding)
#print("result of update_query in reattach_node(): ", result)
if (result.get("relationships_created") == 1) and (result.get("relationships_deleted") == 1):
return True
else:
return False
def link_nodes_by_ids(self, node_id1:int, node_id2:int, rel:str, rel_props = None) -> None:
"""
Locate the pair of Neo4j nodes with the given Neo4j internal ID's.
If they are found, add a relationship - with the name specified in the rel argument,
and with the specified optional properties - from the 1st to 2nd node - unless already present.
EXAMPLE: link_nodes_by_ids(123, 88, "AVAILABLE_FROM", {'cost': 1000})
TODO: maybe return a status, or the Neo4j ID of the relationship just created
:param node_id1: An integer with the Neo4j internal ID to locate | |
import torch
from torch import Tensor
from logging import debug
import os
import sys
from warnings import resetwarnings
from torch._C import device
from torch.functional import Tensor
from h5_util import read_list, write_list
import h5py
import torch.distributed as dist
import numpy as np
from . import projection as proj, scheduler
import torch.distributed as dist
import torch.multiprocessing as mp
import time
from functools import partial
def get_weights_as_list(net):
""" Extract parameters from net, and return a list of tensors"""
return [p.data for p in net.parameters()]
def get_random_weights(weights):
"""
Produce a random direction that is a list of random Gaussian tensors
with the same shape as the network's weights, so one direction entry per weight.
"""
return [torch.randn(w.size()) for w in weights]
class Direction:
################################################################################
# Normalization Functions
################################################################################
@staticmethod
def _normalize(direction: Tensor, weights: Tensor, norm='filter'):
"""
Rescale the direction so that it has similar norm as their corresponding
model in different levels.
Args:
direction (tensor): a variables of the random direction for one layer
weights (tensor): a variable of the original model for one layer
norm: normalization method, 'filter' | 'layer' | 'weight'
"""
if norm == 'filter':
# Rescale the filters (weights in group) in 'direction' so that each
# filter has the same norm as its corresponding filter in 'weights'.
assert direction.dim() == 3 and weights.dim() == 3
sc = weights.norm(dim=(-1, -2), keepdim=True)/(direction.norm(dim=(-1, -2), keepdim=True) + 1e-10)
direction.mul_(sc)
elif norm == 'layer':
# Rescale the layer variables in the direction so that each layer has
# the same norm as the layer variables in weights.
direction.mul_(weights.norm()/direction.norm())
elif norm == 'weight':
# Rescale the entries in the direction so that each entry has the same
# scale as the corresponding weight.
direction.mul_(weights)
elif norm == 'dfilter':
# Rescale the entries in the direction so that each filter direction
# has the unit norm.
dnorm = direction.view(direction.size(0), -1).norm(dim=-1).view(direction.size())
direction.div_(dnorm + 1e-10)
elif norm == 'dlayer':
# Rescale the entries in the direction so that each layer direction has
# the unit norm.
direction.div_(direction.norm())
@staticmethod
def normalize_for_weights(direction, weights, norm='filter', ignore='biasbn'):
"""
The normalization scales the direction entries according to the entries of weights.
"""
assert(len(direction) == len(weights))
for d, w in zip(direction, weights):
if d.dim() <= 1:
if ignore == 'biasbn': d.fill_(0) # ignore directions for weights with 1 dimension
else: d.copy_(w) # keep directions for weights/bias that are only 1 per node
else:
Direction._normalize(d, w, norm)
@staticmethod
def create_random_direction(params, ignore='biasbn', norm=True, norm_type='filter'):
"""
Setup a random (normalized) direction with the same dimension as
the weights or states.
Args:
net: the given trained model
dir_type: 'weights' or 'states', type of directions.
ignore: 'biasbn', ignore biases and BN parameters.
norm: direction normalization method, including
'filter" | 'layer' | 'weight' | 'dlayer' | 'dfilter'
Returns:
direction: a random direction with the same dimension as weights or states.
"""
# random direction
weights_data = [p.data for p in params] # a list of parameters.
direction = [torch.randn(w.size()) for w in params]
if norm:
Direction.normalize_for_weights(direction, weights_data, norm_type, ignore)
return direction
@staticmethod
def set_weights(net, weights, directions=None, step=None):
"""
Overwrite the network's weights with a specified list of tensors
or change weights along directions with a step size.
"""
if directions is None:
# You cannot specify a step length without a direction.
for (p, w) in zip(net.parameters(), weights):
p.data.copy_(w.type(type(p.data)))
else:
assert step is not None, 'If a direction is specified then step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
# print(dx)
# print(len(dx), len(dy))
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
# print('change norm', torch.norm(proj.tensorlist_to_tensor(changes)))
# print(torch.norm(proj.tensorlist_to_tensor(changes)))
for (p, w, d) in zip(net.parameters(), weights, changes):
p.copy_(w + d)
@staticmethod
def save(direction, h5_file, name):
# Create the plotting directions
write_list(h5_file, name, direction)
def load(h5_file, name):
# Create the plotting directions
return read_list(h5_file, name)
@staticmethod
def to_tensor(dir, **kwargs):
return [torch.tensor(arr, **kwargs) for arr in dir]
class Dir2D(object):
def __init__(self, model=None, dirs = None, mode='random') -> None:
super().__init__()
if model is not None:
weights = get_weights_as_list(model) # List representation
dir0 = Direction.create_random_direction(weights, norm_type='layer')
dir1 = Direction.create_random_direction(weights, norm_type='layer')
self._dir = (dir0, dir1)
# Todo: Assert these dir are othorgonal
elif dirs is not None and len(dirs) == 2:
self._dir = dirs
def save(self, h5file):
Direction.save(self[0], h5file, 'xdirection')
Direction.save(self[1], h5file, 'ydirection')
@classmethod
def load(cls, h5file):
dir0 = Direction.load(h5file, 'xdirection')
dir1 = Direction.load(h5file, 'ydirection')
return cls(dirs=(dir0, dir1))
def __getitem__(self, dir_index):
return self._dir[dir_index]
def __len__(self):
return len(self._dir)
def to_tensor(self, **kwargs):
self.tensors = (Direction.to_tensor(self._dir[0], **kwargs),
Direction.to_tensor(self._dir[1], **kwargs))
def tensor(self, dir_index):
return self.tensors[dir_index]
class Surface:
def __init__(self, path_dir2d, rect, resolution, path, layers) -> None:
with h5py.File(path_dir2d) as f:
self.dirs = Dir2D.load(f)
self.dir_path = path_dir2d
xmin, ymin, xmax, ymax = rect
xnum, ynum = int(resolution[0]), int(resolution[1])
self.xcoord = np.linspace(xmin, xmax, num=xnum)
self.ycoord = np.linspace(ymin, ymax, num=ynum)
self.shape = (xnum, ynum)
self.path = path
self.h5_file = None
self.layers = layers
def add_layer(self, name, value=-1):
self.layers[name] = np.ones(self.shape)*value
def mesh(self):
return np.meshgrid(self.xcoord, self.ycoord)
def save(self):
f = h5py.File(self.path, 'w-') # Create file, fail if exists
f.attrs['dir_path'] = self.dir_path
f['xcoord'] = self.xcoord
f['ycoord'] = self.xcoord
layer_grp = f.create_group('layers')
for name, values in self.layers.items():
layer_grp.create_dataset(name, data=values)
f.close()
@classmethod
def load(cls, path):
f = h5py.File(path, 'r')
direction_path = f.attrs['dir_path']
xcoord = f['xcoord'][:]
ycoord = f['ycoord'][:]
layer_grp = f['layers']
layers = {}
for name, values in layer_grp.items():
layers[name] = values[:]
obj = cls(direction_path, (0, 0, 0, 0), (0, 0), path, layers)
obj.xcoord = xcoord
obj.ycoord = ycoord
f.close()
return obj
def get_unplotted_indices(self, layer):
"""
Args:
layer: layer name, with value -1 when the value is not yet calculated.
Returns:
- a list of indices into vals for points that have not yet been calculated.
- a list of corresponding coordinates, with one x/y coordinate per row.
"""
# Create a list of indices into the vectorizes vals
vals = self.layers[layer]
inds = np.array(range(vals.size))
# Select the indices of the un-recorded entries, assuming un-recorded entries
# will be smaller than zero. In case some vals (other than loss values) are
# negative and those indexces will be selected again and calcualted over and over.
inds = inds[vals.ravel() <= 0]
# Make lists containing the x- and y-coodinates of the points to be plotted
# If the plot is 2D, then use meshgrid to enumerate all coordinates in the 2D mesh
xcoord_mesh, ycoord_mesh = self.mesh()
s1 = xcoord_mesh.ravel()[inds]
s2 = ycoord_mesh.ravel()[inds]
return inds, np.c_[s1,s2]
def open(self, mode):
self.h5_file = h5py.File(self.path, mode)
def flush(self):
assert self.h5_file, 'Have yet open'
self.h5_file.flush()
def close(self):
assert self.h5_file, 'Have yet open'
self.h5_file.close()
class Sampler:
def __init__(self, model, surface, layer_names, device, rank=-1) -> None:
self.model = model
self.surface = surface
self.rank = rank
self.device = device
self.layer_names = layer_names
def prepair(self):
# if rank == 0: self.surface.open('r+')
self.surface.dirs.to_tensor(device=self.device)
# Generate a list of indices of 'losses' that need to be filled in.
# The coordinates of each unfilled index (with respect to the direction vectors
# stored in 'd') are stored in 'coords'.
# inds, coords, inds_nums = scheduler.get_job_indices(*surface.get_unplotted_indices(loss_key), rank, size)
self.layers_ts = [torch.tensor(self.surface.layers[name], device=self.device) for name in self.layer_names]
self.layers_fl_ts = [torch.ravel(layer) for layer in self.layers_ts]
model = self.model
model.eval()
model.to(self.device)
def reduce(self):
# Send updated plot data to the master node
if self.rank < 0: return 0
syc_start = time.time()
for layer in self.layers_fl_ts:
dist.reduce(layer, 0, op=dist.ReduceOp.MAX)
syc_time = time.time() - syc_start
return syc_time
def write(self):
# Only the master node writes to the file - this avoids write conflicts
if self.rank <= 0:
for name, layer in zip(self.layer_names, self.layers_ts):
self.surface.h5_file['layers'][name][:] = layer.cpu().numpy()
self.surface.flush()
def run(self, evaluation, inds, coords, inds_nums):
"""
Calculate the loss values and accuracies of modified models in parallel
using MPI reduce.
"""
# dirs_tensor = (proj.tensorlist_to_tensor(directions[0]), proj.tensorlist_to_tensor(directions[1]))
print('Computing %d values for rank %d'% (len(inds), self.rank))
start_time = time.time()
total_sync = 0.0
with torch.no_grad():
model = self.model
weights = [torch.clone(p) for p in model.parameters()]
| |
# -*- coding: utf-8 -*-
# ******************************************************
# Filename : clientRun.py
# Author : <NAME>
# Email : <EMAIL>
# Blog : https://iyuanshuo.com
# Last modified: 2020-06-18 21:10
# Description :
# ******************************************************
import sys
from dagComps import transaction
import socket
import os
from dagSocket import dagClient
from torch.utils.tensorboard import SummaryWriter
import time
import threading
import shutil
import json
import random
import subprocess
import pickle
import pandas as pd
# Common Components
sys.path.append('../commonComponent')
import usefulTools
## FL related
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
sys.path.append('../federatedLearning')
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
import buildModels
import datetime
# Number of tips selected by the leader of shard blockchain
alpha = 3
# Number of tips needs to be kept greater than 3
beta = 3
# Number of tips confirmed by the new transaction
gamma = 2
# Index of shard network
nodeNum = 1
# Rounds trained in shard
# shard_round = 4
# shell envs of Org1
fabricLocation = "export FabricL=/home/shawn/Documents/fabric-samples/test-network"
shellEnv1 = "export PATH=${FabricL}/../bin:$PATH"
shellEnv2 = "export FABRIC_CFG_PATH=${FabricL}/../config/"
shellEnv3 = "export CORE_PEER_TLS_ENABLED=true"
shellEnv4 = "export CORE_PEER_LOCALMSPID=\"Org1MSP\""
shellEnv5 = "export CORE_PEER_TLS_ROOTCERT_FILE=${FabricL}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt"
shellEnv6 = "export CORE_PEER_MSPCONFIGPATH=${FabricL}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp"
shellEnv7 = "export CORE_PEER_ADDRESS=localhost:7051"
oneKeyEnv = shellEnv1 + " && " + shellEnv2 + " && " + shellEnv3 + " && " + shellEnv4 + " && " + shellEnv5 + " && " + shellEnv6 + " && " + shellEnv7
# Acc and Loss of the model trained by shard
nodeTestAcc = []
nodeTestLoss = []
# Acc and Loss on training set
nodeTrainAcc = []
nodeTrainLoss = []
def main(aim_addr='172.16.17.32'):
if os.path.exists('./clientS'):
shutil.rmtree('./clientS')
os.mkdir('./clientS')
if os.path.exists('./clientS/paras'):
shutil.rmtree('./clientS/paras')
os.mkdir('./clientS/paras')
if os.path.exists('./clientS/paras/apvTrans'):
shutil.rmtree('./clientS/paras/apvTrans')
os.mkdir('./clientS/paras/apvTrans')
if os.path.exists('./clientS/paras/local'):
shutil.rmtree('./clientS/paras/local')
os.mkdir('./clientS/paras/local')
if os.path.exists('./clientS/tipsJson'):
shutil.rmtree('./clientS/tipsJson')
os.mkdir('./clientS/tipsJson')
if os.path.exists('./clientS/apvJson'):
shutil.rmtree('./clientS/apvJson')
os.mkdir('./clientS/apvJson')
# build model
net_glob, args, dataset_train, dataset_test, dict_users = buildModels.modelBuild()
net_glob.train()
## copy weights
w_glob = net_glob.state_dict()
iteration_count = 0
# selected device
## init the list of device name
allDeviceName = []
for i in range(args.num_users):
allDeviceName.append("device"+("{:0>5d}".format(i)))
deviceSelected = []
# Randomly select the devices
# m = max(int(args.frac * args.num_users), 1) # args.frac is the fraction of users
# idxs_users = np.random.choice(range(args.num_users), m, replace=False)
# print('\n**************************** Idxs of selected devices *****************************')
# print('The idxs of selected devices are\n', idxs_users)
# print('*************************************************************************************\n')
# ## Exchange the info of selected device with fabric
# with open('../commonComponent/selectedDeviceIdxs.txt', 'wb') as f:
# pickle.dump(idxs_users, f)
idxs_users = [ 5, 56, 76, 78, 68, 25, 47, 15, 61, 55]
# idxs_users = [60, 37, 27, 70, 79, 34, 18, 88, 57, 98]
# idxs_users = [48, 46, 33, 82, 4, 7, 6, 91, 92, 52]
dateNow = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
basic_acc_list = []
## tensorboard Part
# writer = SummaryWriter()
# writer.add_scalars('Acc', {'Acc_with_Check': 0}, 0)
# tenfig_data = []
# writer.add_scalars('Acc', {'Acc_without_Check': 0}, 0)
# AWOC_fileName = '/root/shard3/data/shard_3_round3_tenfig_data_cnn_20200821135421.csv'
# acc_wo_check_csv = pd.read_csv(AWOC_fileName)
# acc_wo_check_data = np.array(acc_wo_check_csv['shard_3'])
# writer.add_scalars('Loss', {'Loss_with_Check': 1}, 0)
# tenfig_data_loss = []
# writer.add_scalars('Loss', {'Loss_without_Check': 1}, 0)
# LWOC_fileName = '/root/shard3/data/shard_3_round3_tenfig_data_loss_cnn_20200821135421.csv'
# loss_wo_check_csv = pd.read_csv(LWOC_fileName)
# loss_wo_check_data = np.array(loss_wo_check_csv['shard_3'])[1:]
# tensor_iter = 1
## tensorboard part
## Exchange the info of selected device with fabric: dict_user_fileHash:QmTuvPRDGnLm95fL7uxxhvegoWL3Q9YyAUtEsTK5ZetN4W
dict_users_file = "../commonComponent/dict_users.pkl"
dict_userf_fileHash = "QmTuvPRDGnLm95fL7uxxhvegoWL3Q9YyAUtEsTK5ZetN4W"
while 1:
dictUserGetStatus, dictUsersttCodeGet = usefulTools.ipfsGetFile(dict_userf_fileHash, dict_users_file)
print('The filehash of this dict_user is ' + dict_userf_fileHash + ' , and the file is ' + dict_users_file + '!')
if dictUsersttCodeGet == 0:
print(dictUserGetStatus.strip())
print('The dict_user file ' + dict_users_file + ' has been downloaded!\n')
break
else:
print(dictUserGetStatus)
print('\nFailed to download the dict_user file ' + dict_users_file + ' !\n')
with open(dict_users_file, 'rb') as f:
dict_users = pickle.load(f)
for idx in idxs_users:
deviceSelected.append(allDeviceName[idx])
print('\n**************************** Selected devices *****************************')
print('The idxs of selected devices are\n', deviceSelected)
print('*****************************************************************************\n')
while 1:
print('\n\n\n**************************** Iteration %d *****************************'%iteration_count)
# init the task ID
taskID = 'task'+str(random.randint(1,10))+str(random.randint(1,10))+str(random.randint(1,10))+str(random.randint(1,10))
# Choose and require the apv trans
apv_trans_cands = []
if iteration_count == 0:
apv_trans_cands.append('GenesisBlock')
else:
tips_list = 'tip_list'
tips_file = './clientS/tipsJson/iteration-' + str(iteration_count) + '-' + tips_list + '.json'
dagClient.client_tips_require(aim_addr, tips_list, tips_file)
## try to fix the JSONDecodeError
try:
with open(tips_file, encoding='utf-8-sig', errors='ignore', mode='r') as f1:
tips_dict = json.load(f1)
f1.close()
except:
time.sleep(2)
dagClient.client_tips_require(aim_addr, tips_list, tips_file)
with open(tips_file, encoding='utf-8-sig', errors='ignore', mode='r') as f1:
tips_dict = json.load(f1)
f1.close()
if len(tips_dict) <= alpha:
apv_trans_cands = list(tips_dict.keys())
else:
apv_trans_cands = random.sample(tips_dict.keys(), alpha)
print('\n************************* Select Candidates Tips *****************************')
print('The candidates tips are ', apv_trans_cands)
print('********************************************************************************\n')
# Get the trans file and the model paras file
apv_trans_cands_dict = {}
for apvTrans in apv_trans_cands:
apvTransFile = './clientS/apvJson/' + apvTrans + '.json'
dagClient.client_trans_require(aim_addr, apvTrans, apvTransFile)
print('\nThis approved trans is ', apvTrans, ', and the file is ', apvTransFile)
apvTransInfo = transaction.read_transaction(apvTransFile)
apvParasFile = './clientS/paras/apvTrans/iteration-' + str(iteration_count) + '-' + apvTrans + '.pkl'
while 1:
fileGetStatus, sttCodeGet = usefulTools.ipfsGetFile(apvTransInfo.model_para, apvParasFile)
print('The filehash of this approved trans is ' + apvTransInfo.model_para + ', and the file is ' + apvParasFile + '!')
if sttCodeGet == 0:
print(fileGetStatus.strip())
print('The apv parasfile ' + apvParasFile + ' has been downloaded!\n')
break
else:
print(fileGetStatus)
print('\nFailed to download the apv parasfile ' + apvParasFile + ' !\n')
apv_trans_cands_dict[apvTransInfo.name] = float(apvTransInfo.model_acc)
# select tips for aggregation of basic model !!! key function
apv_trans_final = []
if len(apv_trans_cands_dict) == alpha:
sort_dict = sorted(apv_trans_cands_dict.items(),key=lambda x:x[1],reverse=True)
for i in range(gamma):
apv_trans_final.append(sort_dict[i][0])
else:
apv_trans_final = apv_trans_cands
# load the apv paras
w_apv = []
for item in apv_trans_final:
apvParasFile = './clientS/paras/apvTrans/iteration-' + str(iteration_count) + '-' + item + '.pkl'
net_glob.load_state_dict(torch.load(apvParasFile, map_location=torch.device('cpu')))
w_tmp = net_glob.state_dict()
w_apv.append(copy.deepcopy(w_tmp))
if len(w_apv) == 1:
w_glob = w_apv[0]
else:
w_glob = FedAvg(w_apv)
baseParasFile = './clientS/paras/baseModelParas-iter'+str(iteration_count)+'.pkl'
torch.save(w_glob, baseParasFile)
# evalute the acc of basic model obtain from DAG
basicModelAcc, basicModelLoss = buildModels.evalua(net_glob, w_glob, dataset_test, args)
basicModelAcc = basicModelAcc.cpu().numpy().tolist()
print("\n************************************")
print("Acc of the basic model in iteration "+str(iteration_count)+" is "+str(basicModelAcc))
print("************************************")
basic_acc_list.append(basicModelAcc)
basicAccDf = pd.DataFrame({'shard_{}'.format(nodeNum):basic_acc_list})
basicAccDf.to_csv("../data/shard_{}_round{}_basic_acc_{}_{}.csv".format(nodeNum, args.epochs, args.model, dateNow),index=False,sep=',')
# Add the paras file of base model to ipfs network for shard training
while 1:
basefileHash, baseSttCode = usefulTools.ipfsAddFile(baseParasFile)
if baseSttCode == 0:
print('\nThe base mode parasfile ' + baseParasFile + ' has been uploaded!')
print('And the fileHash is ' + basefileHash + '\n')
break
else:
print('Error: ' + basefileHash)
print('\nFailed to uploaded the aggregated parasfile ' + baseParasFile + ' !\n')
# Task release & model aggregation
## Task release
taskEpochs = args.epochs
taskInitStatus = "start"
taskUsersFrac = args.frac
while 1:
taskRelease = subprocess.Popen(args=['../commonComponent/interRun.sh release '+taskID+' '+str(taskEpochs)+' '+taskInitStatus+' '+str(taskUsersFrac)], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
trOuts, trErrs = taskRelease.communicate(timeout=10)
if taskRelease.poll() == 0:
print('*** ' + taskID + ' has been released! ***')
print('*** And the detail of this task is ' + trOuts.strip() + '! ***\n')
break
else:
print(trErrs)
print('*** Failed to release ' + taskID + ' ! ***\n')
time.sleep(2)
## Publish the init base model
### taskEpoch template {"Args":["set","taskID","{"epoch":1,"status":"training","paras":"fileHash"}"]}
while 1:
spcAggModelPublish = subprocess.Popen(args=['../commonComponent/interRun.sh aggregated '+taskID+' 0 training '+basefileHash], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
aggPubOuts, aggPubErrs = spcAggModelPublish.communicate(timeout=10)
if spcAggModelPublish.poll() == 0:
print('*** The init aggModel of ' + taskID + ' has been published! ***')
print('*** And the detail of the init aggModel is ' + aggPubOuts.strip() + ' ! ***\n')
break
else:
print(aggPubErrs)
print('*** Failed to publish the init aggModel of ' + taskID + ' ! ***\n')
## wait the local train
time.sleep(10)
## Aggregated the local model trained by the selected devices
currentEpoch = 1
aggEchoFileHash = ''
aggModelAcc = 50.0
while (currentEpoch <= args.epochs):
flagList = set(copy.deepcopy(deviceSelected))
w_locals = []
while (len(flagList) != 0):
flagSet = set()
ts = []
lock = threading.Lock()
for deviceID in flagList:
localFileName = './clientS/paras/local/' + taskID + '-' + deviceID + '-epoch-' + str(currentEpoch) + '.pkl'
t = threading.Thread(target=usefulTools.queryLocal,args=(lock,taskID,deviceID,currentEpoch,flagSet,localFileName,))
t.start()
ts.append(t)
for t in ts:
t.join()
time.sleep(2)
flagList = flagList - flagSet
for deviceID in deviceSelected:
localFileName = './clientS/paras/local/' + taskID + '-' + deviceID + '-epoch-' + str(currentEpoch) + '.pkl'
## no check
# net_glob.load_state_dict(torch.load(localFileName))
# tmpParas = net_glob.state_dict()
# w_locals.append(copy.deepcopy(tmpParas))
## check the acc of the models trained by selected device & drop the low quality model
canddts_dev_pas = torch.load(localFileName,map_location=torch.device('cpu'))
acc_canddts_dev, loss_canddts_dev = buildModels.evalua(net_glob, canddts_dev_pas, dataset_test, args)
acc_canddts_dev = acc_canddts_dev.cpu().numpy().tolist()
print("Test acc of the model trained | |
<reponame>emma-d-cotter/Hologram-Processing<filename>code/reconstruction.py
import time
import numpy as np
import torch
import cv2
from utils import *
from params import *
from detection import *
import warnings
# written by: <NAME>
# <EMAIL>
# functions for reconstruction of holograms
# includes:
# physical_to_optical and optical_to_physical - functions to convert between physical
# and optical pathlength given indices of refraction of water and air
# reconstruct - reconstruct a hologram at specified plane(s)
# holofft and propogate - worker functions for reconstruction
# bg_subtract - perform background subtraction (should be done before reconstruction)
# gsfocus - perform golden section search optimized auto-focus
# stdcorr - calculate standard deviation correlation focus metric
def physical_to_optical(L):
# convert from physical to optical path length
# (e.g., if physical distance is 500 mm from camera, what is the propgation
# distance z to use in reconstruction?)
if L < L_air:
L_optical = L_air/n_air
elif L <= (L_air + L_water):
L_optical = L_air/n_air + (L-L_air)/n_water
else:
warnings.warn('Warning! Requested path length is longer than separation distance')
L_optical = L_air/n_air + (L-L_air)/n_water
return L_optical
def optical_to_physical(L):
# convert optical path length to phytical path length (e.g., if auto-focus
# says that target is 500 mm from the camera, how far is it *really*?)
if L < L_air*n_air:
L_physical = L*n_air
elif L <= (L_air*n_air + (L_water-L_air)*n_water):
L_physical = L_air*n_air + (L-L_air)*n_water
else:
warnings.warn('Warning! Requested path length is longer than separation distance')
L_physical = L_air*n_air + (L-L_air)*n_water
return L_physical
def reconstruct(holo,zstack,useGPU=True,savedata=False,outdir=None,outputdata=True):
# function for hologram resconstruction
# performs all eperations as pytorch tensors,
# so that it can be performed on GPU
# input is PHYSICAL distance, not OPTICAL distance
# inputs:
# holo - background-subtracted hologram or hologram region (numpy array)
# zstack - reconstruction plane distance or distancess (physical path length, not optical)
# useGPU - use GPU if available (boolean)
# savedata - save reconstructions to disk? (boolean)
# outdir - path to store reconstructions, if requested (str)
# outputdata - return reconstructions? It can be helpful to set this to False
# if many reconstructions are being calculated and saved to disk,
# but you don't want to keep them in memory
# returns: rstack - reconstruction or reconstructions at requested planes
# ~*~*~*~*~*~*~*~*~*~*~*~*~*~*~
# create directory if output is requested
if (savedata) and (outdir is None):
raise "Must specify out directory to store data"
if (savedata) and (not os.path.isdir(outdir)):
os.mkdir(outdir)
# start timer
t = time.time()
# calculate FFT of Holograms (only do this once per hologram, regardless of
# hot many reconstructions are requested)
A,params = holofft(holo,useGPU)
# indexing is different if single reconstrunction plane or multiple
if type(zstack) is int or type(zstack) is float:
z = zstack
rstack = propogate(physical_to_optical(z), A, params, useGPU)
else:
if outputdata:
rstack = torch.zeros((holo.shape[0],holo.shape[1],len(zstack)))
else:
rstack = None
for i, z in enumerate(zstack):
im = propogate(physical_to_optical(z),A,params,useGPU)
if savedata:
zstr = str(np.round(optical_to_physical(z)*10000)).zfill(5)
outfile = os.path.join(outdir,zstr+'.jpg')
cv2.imwrite(outfile,im.cpu().numpy())
if outputdata:
rstack[:,:,i] = im
elapsed = time.time() - t
#print('Reconstruction took: %s' % (elapsed))
return rstack, elapsed
def holofft(holo,useGPU=True):
# first step in reconstruction (so that the fft does not need
# to be calculated multiple times for auto-focusing)
# input : hologram or hologram region
# output: 2d FFT and relevant values for propogation
H = holo
# get dimensions of hologram
No = H.shape[0]
Mo = H.shape[1]
# zero padding based on hologram dimensions
if Mo < No:
d = int(np.ceil((No-Mo)/2))
if (d-((No-Mo)/2)) != 0:
H = np.pad(H,((0,1),(d,d)))
N = No + 1
M = No + 1
p1 = 1 # add one to pad
else:
H = np.pad(H,((0,0),(d,d)))
N = No
M = No
p1 = 0 # add zero to pad
ar = 0 # tall image
elif No < Mo:
d = int(np.ceil((Mo-No)/2))
if (d-((Mo-No)/2)) != 0:
H = np.pad(H,((d,d),(0,1)))
N = Mo + 1
M = Mo + 1
p1 = 1 # add one to pad
else:
H = np.pad(H,((d,d),(0,0)))
N = Mo
M = Mo
p1 = 0 # add zero to pad
ar = 1 # squat image
else:
ar = 0
d = 0
N = No
M = Mo
p1 = 0;
# convert to tensor and send to GPU if available/requested
if useGPU:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
H = torch.from_numpy(H).type(torch.cfloat).to(device)
# create grid of indices
u = torch.arange(1,N+1)
v = torch.arange(1,M+1)
V,U = torch.meshgrid(v,u)
L = pix_size*N # side length
alpha = wavelength*(U-N/2-1)/L
beta = wavelength*(V-M/2-1)/L
# terms needed for reconstruction
f1 = torch.exp(1j*np.pi*(U+V)).to(device)
f2 = torch.exp(-1j*np.pi*(U+V)).to(device)
alpha = wavelength*(U-N/2-1)/L
beta = wavelength*(V-M/2-1)/L
# calculate FFT of hologram
A = complexto2D(f1*H,device)
A = torch.fft(A,2)
A = tocomplex(A, device)
# store parameters needed for propogation
params = {
'M':M,
'N':N,
'd':d,
'beta':beta,
'alpha':alpha,
'p1':p1,
'ar':ar,
'f1':f1,
'f2':f2
}
return A, params
def propogate(z, A, params,useGPU=True):
# propogate hologram to distance z from sensor (perform reconstruction)
# inputs
# z - distance, in m
# A, params - output from "holofft"
# extract params
M = params['M']
N = params['N']
d = params['d']
beta = params['beta']
alpha = params['alpha']
p1 = params['p1']
ar = params['ar']
f1 = params['f1']
f2 = params['f2']
# convert to tensor and send to GPU if available/requested
if useGPU:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
# calculate propogater
S = torch.exp(-(2*np.pi*z*1j/wavelength)*
torch.sqrt(1-torch.square(alpha)-torch.square(beta))).to(device)
# propogate the image spectrum
step = (f1*A)*S
# inverse FFT
step = complexto2D(f2*step, device)
step = torch.ifft(step,2)
step = tocomplex(step, device)
step = torch.abs(f2*step)
# normalize
m = torch.max(step)
step = 255*step/m
# trim back to original dimensions
if ar == 1:
if p1 == 1:
img = step[d:M-d,0:-1]
else:
img = step[d:M-d,:]
else:
if p1 == 1:
img = step[0:-1,d:M-d]
else:
img = step[:,d:M-d]
return img
# background subtraction function
def bg_subtract(holoname, holometa):
# holoname: name of hologram file (excluding .tif)
# holometa: hologaphic camera metadata structure for a dive (ouput of load_holometa)
# (pandas data frame with paths and timestamps of all recorded holograms)
file_idx = int(np.where([[holoname in path] for path in holometa['file']])[0])
imagepath = holometa['file'][file_idx]
fileroot = imagepath.split('\\')[-1][:-4]
holo = cv2.imread(imagepath,cv2.IMREAD_GRAYSCALE)
# find 5 previous files for background
bg_idx = np.arange(file_idx-5,file_idx)
# check that 5 previous files are at approximately the same depth (within 3 m).
# Don't process if there are fewer than 3 background files
bg_idx = bg_idx[np.abs(holometa['z'][file_idx] - holometa['z'][bg_idx]) < 3]
if bg_idx.size > 2:
bg = np.zeros((holo.shape[0],holo.shape[1],bg_idx.shape[0]))
for i,idx in enumerate(bg_idx):
imagepath = holometa['file'][idx]
bgim =cv2.imread(imagepath, cv2.IMREAD_GRAYSCALE)
bg[:,:,i] = bgim
bg = np.median(bg,axis=2)
holo = holo - bg
flag = 0
else:
print('Sufficient Background data not available')
flag = 1
return holo, flag
def gsfocus(H, reginds, precision,zhi=1.2536,zlo=0.230):
# golden section search focus.
# inputs:
# H - hologram or hologram region to focused
# reginds - if the hologram region has been padded, the indices of the bounding
# box of the diffraction pattern of index within H (xmin,ymin,xmax,ymax)
# [output from extract_region, in utils.py]
# precision - desired precision (in m). Typical value 0.001 m
# zhi - lower limit of imaging volume (physical distance from Camera)
# zlo - upper limit of imaging volume (physical distance from camera)
# returns: focused reconstruction and physical focus distance (zf), in m
t = time.time()
A, params = holofft(H)
phi = 1/(( 1 + 5**0.5)/2)
dz = 999
n = 1
(xmin,ymin,xmax,ymax) = reginds
while dz > precision:
z1 = -(phi*(zhi-zlo) - zhi)
z2 = phi*(zhi-zlo) + zlo
im1 = propogate(physical_to_optical(z1), A, params)
im1 = im1[xmin:xmax,ymin:ymax]
im2 = propogate(physical_to_optical(z2), A, params)
im2 = im2[xmin:xmax,ymin:ymax]
y1 = -stdcorr(im1)
y2 = -stdcorr(im2)
n = n + 2
if y2 > y1:
zhi = z2
else:
zlo = z1
dz = np.abs(zhi - zlo)
if y1 < y2:
zf = z1
im = im1
else:
zf = z2
im = im2
elapsed = time.time() - t
return im, zf
def stdcorr(img):
# calculate standard correlation focus metric for a given image/reconstruction
# works for numpy array | |
<filename>FlaskRESTFULAPITest_JE/venv/Lib/site-packages/werkzeug/debug/tbtools.py
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import inspect
import json
import os
import re
import sys
import sysconfig
import traceback
from tokenize import TokenError
from .._compat import PY2
from .._compat import range_type
from .._compat import reraise
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from .._compat import to_unicode
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from ..utils import escape
from .console import Console
_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
UTF8_COOKIE = b"\xef\xbb\xbf"
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&cmd=resource&f=console.png">
<script src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = u"""\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ u"""\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ u"""\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = u"""\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = u"""\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = u"""\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions=False, show_hidden_frames=False, skip=0
):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
reraise(exc_type, exc_value, tb)
for _ in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self):
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self):
return SOURCE_LINE_HTML % {
"classes": u" ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = exc_type.__module__ + "." + exception_type
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
if PY2:
break
exc_value = exc_value.__cause__ or exc_value.__context__
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self):
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u"\n"
logfile.write(to_native(tb, "utf-8", "replace"))
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps(
{
"description": "Werkzeug Internal Server Error",
"public": False,
"files": {"traceback.txt": {"content": self.plaintext}},
}
).encode("utf-8")
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen("https://api.github.com/gists", data=data)
resp = json.loads(rv.read().decode("utf-8"))
rv.close()
return {"url": resp["html_url"], "id": resp["id"]}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = u"Syntax Error"
else:
title = u"Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description_wrapper = u"<pre class=syntaxerror>%s</pre>"
else:
description_wrapper = u"<blockquote>%s</blockquote>"
return SUMMARY_HTML % {
"classes": u" ".join(classes),
"title": u"<h3>%s</h3>" % title if title else u"",
"frames": u"\n".join(frames),
"description": description_wrapper % escape(self.exception),
}
def render_full(self, evalex=False, secret=None, evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self):
return u"\n".join([group.render_text() for group in self.groups])
@property
def id(self):
return id(self)
class Group(object):
"""A group of frames for an exception in a traceback. On Python 3,
if the exception has a ``__cause__`` or ``__context__``, there are
multiple exception groups.
"""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if not PY2:
if exc_value.__cause__ is not None:
self.info = (
u"The above exception was the direct cause of the"
u" following exception"
)
elif exc_value.__context__ is not None:
self.info = (
u"During handling of the above exception, another"
u" exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return to_unicode(rv, "utf-8", "replace")
def render(self, mark_lib=True):
out = []
if self.info is not None:
out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
for frame in self.frames:
out.append(
u"<li%s>%s"
% (
u' title="%s"' % escape(frame.info) if frame.info else u"",
frame.render(mark_lib=mark_lib),
)
)
return u"\n".join(out)
def render_text(self):
out = []
if self.info is not None:
out.append(u"\n%s:\n" % self.info)
out.append(u"Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return u"\n".join(out)
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn | |
<filename>snerg/snerg/rendering.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions that render a baked SNeRG model directly in python."""
import numpy as np
import tensorflow as tf
from snerg.nerf import datasets
from snerg.snerg import model_utils
#
# Fast raymarching functions to render on the CPU in parallel with tensorflow.
#
def rays_aabb_intersection_tf(aabb_min, aabb_max, origins, inv_directions):
"""Intersects rays with axis aligned bounding boxes (AABBs).
The bounding boxes are represented by their min/max coordinates. Note that
each ray needs a separate bounding box.
Args:
aabb_min: A tf.tensor [..., 3] containing the coordinate of the corner
closest to the origin for each AABB.
aabb_max: A tf.tensor [...,3] containing the coordinate of the corner
furthest from the origin for each AABB.
origins: A tf.tensor [..., 3] of the ray origins.
inv_directions: A tf.tensor [..., 3] of ray directions. However, each
channel has been inverted, i.e. (1/dx, 1/dy, 1/dz).
Returns:
t_min: A [...] tensor containing the smallest (signed) distance along the
rays to the closest intersection point with each AABB.
t_max: A [...] tensor containing the largest (signed) distance along the
rays to the furthest intersection point with each AABB. Note that if
t_max < t_min, the ray does not intersect the AABB.
"""
t1 = (aabb_min - origins) * inv_directions
t2 = (aabb_max - origins) * inv_directions
t_min = tf.math.reduce_max(tf.minimum(t1, t2), axis=-1)
t_max = tf.math.reduce_min(tf.maximum(t1, t2), axis=-1)
return t_min, t_max
@tf.function
def atlas_raymarch_rays_tf(origins, directions, atlas_t, atlas_block_indices_t,
atlas_params, scene_params, grid_params):
"""Ray marches rays through a SNeRG scene and returns accumulated RGBA.
Args:
origins: A tf.tensor [N, 3] of the ray origins.
directions: A tf.tensor [N, 3] of ray directions.
atlas_t: A tensorflow tensor containing the texture atlas.
atlas_block_indices_t: A tensorflow tensor containing the indirection grid.
atlas_params: A dict with params for building and rendering with
the 3D texture atlas.
scene_params: A dict for scene specific params (bbox, rotation, resolution).
grid_params: A dict with parameters describing the high-res voxel grid which
the atlas is representing.
Returns:
rgb: An [N, C] tf.tensor with the colors and features accumulated along each
ray.
alpha: An [N, 1] tf.tensor with the alpha value accumuated along each ray.
"""
# Extract the relevant parameters from the dictionaries.
worldspace_t_opengl = tf.cast(
scene_params['worldspace_T_opengl'], dtype=tf.float32)
min_xyz = tf.cast(scene_params['min_xyz'], dtype=tf.float32)
voxel_size = tf.cast(grid_params['_voxel_size'], dtype=tf.float32)
atlas_block_size = atlas_params['atlas_block_size']
grid_size = tf.cast(grid_params['_grid_size'], dtype=tf.float32)
data_block_size = atlas_params['_data_block_size']
num_channels = scene_params['_channels']
# Set up the rays and transform them to the voxel grid coordinate space.
num_rays = origins.shape[0]
opengl_t_worldspace = tf.linalg.inv(worldspace_t_opengl)
origins_hom = tf.concat([origins, tf.ones_like(origins[Ellipsis, 0:1])], axis=-1)
origins_hom = tf.reshape(origins_hom, (-1, 4))
origins_hom = tf.matmul(origins_hom, opengl_t_worldspace)
origins_opengl = tf.reshape(origins_hom[Ellipsis, 0:3], origins.shape)
directions_norm = tf.linalg.norm(directions, axis=-1, keepdims=True)
directions /= directions_norm
directions_opengl = tf.matmul(directions, opengl_t_worldspace[0:3, 0:3])
origins_grid = (origins_opengl - min_xyz) / voxel_size
directions_grid = directions_opengl
inv_directions_grid = 1.0 / directions_grid
# Now set the near and far distance of each ray to match the cube which
# the voxel grid is defined in.
min_distances, max_distances = rays_aabb_intersection_tf(
tf.zeros_like(min_xyz), tf.cast(grid_size, dtype=tf.float32),
origins_grid, inv_directions_grid)
invalid_mask = min_distances > max_distances
zero_fill = tf.zeros_like(min_distances)
min_distances = tf.where(invalid_mask, zero_fill, min_distances)
max_distances = tf.where(invalid_mask, zero_fill, max_distances)
# The NeRF near/far bounds have been set for unnormalized ray directions, so
# we need to scale our bounds here to compensate for normalizing.
near_in_voxels = directions_norm[Ellipsis, 0] * scene_params['near'] / voxel_size
far_in_voxels = directions_norm[Ellipsis, 0] * scene_params['far'] / voxel_size
min_distances = tf.maximum(near_in_voxels, min_distances)
max_distances = tf.maximum(near_in_voxels, max_distances)
max_distances = tf.minimum(far_in_voxels, max_distances)
current_distances = tf.expand_dims(min_distances, -1) + 0.5
# Finally, set up the accumulation buffers we need for ray marching.
total_rgb = tf.zeros((num_rays, num_channels), dtype=tf.float32)
total_visibility = tf.ones((num_rays, 1), dtype=tf.float32)
init_state = (0, current_distances, total_rgb, total_visibility)
max_num_steps = tf.math.ceil(tf.linalg.norm(grid_size))
def raymarch_condition(i, current_distances, _, total_visibility):
"""Proceed until each ray is fully opaque or has left the voxel grid.
Args:
i: A integer containing the iteration count.
current_distances: A [N, 1] tensor containing the t values for each ray.
_: Dummy parameter for the accumuated RGB color along each ray.
total_visibility: A [N, 1] tensor with the accumulated visibility
(1 - alpha) along each ray.
Returns:
False if all of the rays have finished ray marching (exited the scene,
or saturated alpha). Also returns false if a maximum iteration count
has been reached (computed as the diagonal of the dense high-res voxel
grid).
"""
visibility_mask = total_visibility >= 1.0 / 256.0
distance_mask = current_distances < tf.expand_dims(max_distances, -1)
active_mask = tf.math.logical_and(visibility_mask, distance_mask)[Ellipsis, 0]
return tf.cast(i, tf.float32) < max_num_steps and tf.reduce_max(
tf.cast(active_mask, dtype=tf.float32)) > 0.0
def raymarch_body(i, current_distances, total_rgb, total_visibility):
"""Performs a single ray marching step for every ray.
This is the main body of the ray marching loop factored into a nested
function, which allows it to be called iteratively inside a TF graph.
Args:
i: A integer containing the iteration count.
current_distances: A [N, 1] tensor containing the t values for each ray.
total_rgb: Dummy parameter for the accumuated RGB color along each ray.
total_visibility: A [N, 1] tensor with the accumulated visibility
(1 - alpha) along each ray.
Returns:
A tuple containing the updated parameters (i, current_distances,
total_rgb, total_visibility) after the ray marching step.
"""
positions_grid = origins_grid + tf.math.multiply(directions_grid,
current_distances)
positions_atlas_grid = tf.cast(
tf.floor(positions_grid / data_block_size), dtype=tf.int64)
# Speedup: Only process rays that are inside the voxel grid.
epsilon = 0.1
valid_mask = (positions_grid[Ellipsis, 0] < grid_size[Ellipsis, 0] - 0.5 - epsilon)
valid_mask = tf.math.logical_and(
valid_mask, positions_grid[Ellipsis, 1] < grid_size[Ellipsis, 1] - 0.5 - epsilon)
valid_mask = tf.math.logical_and(
valid_mask, positions_grid[Ellipsis, 2] < grid_size[Ellipsis, 2] - 0.5 - epsilon)
valid_mask = tf.math.logical_and(valid_mask,
positions_grid[Ellipsis, 0] > 0.5 + epsilon)
valid_mask = tf.math.logical_and(valid_mask,
positions_grid[Ellipsis, 1] > 0.5 + epsilon)
valid_mask = tf.math.logical_and(valid_mask,
positions_grid[Ellipsis, 2] > 0.5 + epsilon)
invalid_mask = tf.math.logical_not(valid_mask)
# Fetch the atlas indices from the indirection grid.
positions_atlas_grid = tf.where(
tf.expand_dims(invalid_mask, -1), tf.zeros_like(positions_atlas_grid),
positions_atlas_grid)
block_indices = tf.gather_nd(atlas_block_indices_t, positions_atlas_grid)
empty_atlas_mask = block_indices[Ellipsis, 0] < 0
# Compute where each ray intersects the current macroblock.
min_aabb_positions = tf.cast(
positions_atlas_grid * data_block_size, dtype=tf.float32)
max_aabb_positions = min_aabb_positions + data_block_size
_, max_distance_to_aabb = rays_aabb_intersection_tf(
min_aabb_positions, max_aabb_positions, origins_grid,
inv_directions_grid)
# And then skip past empty macroblocks.
skip_ahead_mask = tf.math.logical_and(empty_atlas_mask, valid_mask)
skip_ahead_delta = tf.expand_dims(max_distance_to_aabb,
-1) - current_distances
skip_ahead_delta = tf.where(
tf.expand_dims(tf.logical_not(skip_ahead_mask), -1),
tf.zeros_like(skip_ahead_delta), skip_ahead_delta)
current_distances += skip_ahead_delta
current_distances += 1.0
# Early out if all rays are outside the voxel grid.
if tf.reduce_max(tf.cast(valid_mask, dtype=tf.float32)) == 0.0:
return (i + 1, current_distances, total_rgb, total_visibility)
# For the rays that are 1) inside the voxel grid and 2) inside a non-empty
# macroblock, we fetch RGB, Features and alpha from the texture atlas.
block_indices = tf.where(
tf.expand_dims(empty_atlas_mask, -1), tf.zeros_like(block_indices),
block_indices)
block_indices = tf.where(
tf.expand_dims(invalid_mask, -1), tf.zeros_like(block_indices),
block_indices)
positions_atlas = positions_grid - min_aabb_positions
positions_atlas += tf.cast(
block_indices * atlas_block_size, dtype=tf.float32)
positions_atlas += 1.0 # Account for the one-voxel padding in the atlas.
positions_atlas -= 0.5
rgb = tf.zeros((num_rays, num_channels), dtype=tf.float32)
alpha = tf.zeros((num_rays, 1), dtype=tf.float32)
# Use trilinear interpolation for smoother results.
offsets_xyz = [(x, y, z) # pylint: disable=g-complex-comprehension
for x in [0.0, 1.0]
for y in [0.0, 1.0]
for z in [0.0, 1.0]]
for dx, dy, dz in offsets_xyz:
offset_t = tf.convert_to_tensor([dx, dy, dz], dtype=tf.float32)
atlas_indices = tf.cast(
tf.floor(positions_atlas + offset_t), dtype=tf.int64)
weights = 1 - tf.abs(
tf.cast(atlas_indices, dtype=tf.float32) - positions_atlas)
weights = tf.reshape(tf.math.reduce_prod(weights, axis=-1), alpha.shape)
atlas_indices = tf.where(
tf.expand_dims(invalid_mask, -1), tf.zeros_like(atlas_indices),
atlas_indices)
gathered_features = tf.cast(
tf.gather_nd(atlas_t, atlas_indices), tf.float32)
trilinear_features = gathered_features[Ellipsis, 0:num_channels]
trilinear_alpha = gathered_features[Ellipsis, num_channels:num_channels + 1]
rgb += weights * tf.reshape(trilinear_features, rgb.shape)
alpha += weights * tf.reshape(trilinear_alpha, alpha.shape)
# Finally, pad use dummy values for | |
cons228,
cons198,
cons358,
)
rule1388 = ReplacementRule(pattern1388, replacement1388)
pattern1389 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** m_
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons5,
cons52,
cons48,
cons198,
cons358,
)
rule1389 = ReplacementRule(pattern1389, replacement1389)
pattern1390 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons5,
cons52,
cons48,
cons228,
cons491,
)
rule1390 = ReplacementRule(pattern1390, With1390)
pattern1391 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons19,
cons5,
cons52,
cons48,
cons491,
)
rule1391 = ReplacementRule(pattern1391, With1391)
pattern1392 = Pattern(
Integral(
(f_ * x_) ** m_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons5,
cons52,
cons48,
cons228,
cons491,
)
rule1392 = ReplacementRule(pattern1392, replacement1392)
pattern1393 = Pattern(
Integral(
(f_ * x_) ** m_
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons5,
cons52,
cons48,
cons491,
)
rule1393 = ReplacementRule(pattern1393, replacement1393)
pattern1394 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons52,
cons48,
cons228,
cons543,
cons25,
)
rule1394 = ReplacementRule(pattern1394, replacement1394)
pattern1395 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons52,
cons48,
cons543,
cons25,
)
rule1395 = ReplacementRule(pattern1395, replacement1395)
pattern1396 = Pattern(
Integral(
(f_ * x_) ** m_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons5,
cons52,
cons48,
cons228,
cons543,
cons25,
)
rule1396 = ReplacementRule(pattern1396, replacement1396)
pattern1397 = Pattern(
Integral(
(f_ * x_) ** m_
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons5,
cons52,
cons48,
cons543,
cons25,
)
rule1397 = ReplacementRule(pattern1397, replacement1397)
pattern1398 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_
/ (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons52,
cons48,
cons228,
)
rule1398 = ReplacementRule(pattern1398, With1398)
pattern1399 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_
/ (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons52,
cons48,
)
rule1399 = ReplacementRule(pattern1399, With1399)
pattern1400 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1)))
* (a_ + x_ ** n2_ * WC("c", S(1)) + x_ ** n_ * WC("b", S(1))) ** p_,
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons48,
cons228,
cons704,
)
rule1400 = ReplacementRule(pattern1400, replacement1400)
pattern1401 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons48,
cons704,
)
rule1401 = ReplacementRule(pattern1401, replacement1401)
pattern1402 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons228,
cons750,
)
rule1402 = ReplacementRule(pattern1402, replacement1402)
pattern1403 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons750,
)
rule1403 = ReplacementRule(pattern1403, replacement1403)
pattern1404 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** n2_ * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_,
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons566,
cons735,
)
rule1404 = ReplacementRule(pattern1404, replacement1404)
pattern1405 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** m_
* (a_ + x_ ** n2_ * WC("c", S(1))) ** p_
* (d_ + x_ ** n_ * WC("e", S(1))) ** q_,
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
cons566,
cons751,
)
rule1405 = ReplacementRule(pattern1405, replacement1405)
pattern1406 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + x_ ** n_ * WC("b", S(1)) + x_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
)
rule1406 = ReplacementRule(pattern1406, replacement1406)
pattern1407 = Pattern(
Integral(
(x_ * WC("f", S(1))) ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons127,
cons19,
cons4,
cons5,
cons52,
cons48,
)
rule1407 = ReplacementRule(pattern1407, replacement1407)
pattern1408 = Pattern(
Integral(
u_ ** WC("m", S(1))
* (d_ + v_ ** n_ * WC("e", S(1))) ** WC("q", S(1))
* (a_ + v_ ** n_ * WC("b", S(1)) + v_ ** WC("n2", S(1)) * WC("c", S(1)))
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons52,
cons48,
cons556,
)
rule1408 = ReplacementRule(pattern1408, replacement1408)
pattern1409 = Pattern(
Integral(
u_ ** WC("m", S(1))
* (a_ + v_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + v_ ** n_ * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons48,
cons556,
)
rule1409 = ReplacementRule(pattern1409, replacement1409)
pattern1410 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** WC("mn", S(1)) * WC("e", S(1))) ** WC("q", S(1))
* (
x_ ** WC("n", S(1)) * WC("b", S(1))
+ x_ ** WC("n2", S(1)) * WC("c", S(1))
+ WC("a", S(0))
)
** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons682,
cons587,
cons588,
)
rule1410 = ReplacementRule(pattern1410, replacement1410)
pattern1411 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + x_ ** WC("n2", S(1)) * WC("c", S(1))) ** WC("p", S(1))
* (d_ + x_ ** WC("mn", S(1)) * WC("e", S(1))) ** WC("q", S(1)),
x_,
),
cons2,
cons8,
cons29,
cons50,
cons19,
cons728,
cons5,
cons727,
cons588,
)
rule1411 = ReplacementRule(pattern1411, replacement1411)
pattern1412 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** WC("mn", S(1)) * WC("e", S(1))) ** q_
* (
x_ ** | |
import tkinter
import opros
import param_module
import time
import threading
import start
general_shift = 50 # переменная для сдвига всех элементов по вертикали
flag_cheks = True # флаг для того чтобы ставить и снимать галочки на всех модулях
flag_cheks_on_51 = True # флаг для того чтобы ставить и снимать галочки на всех модулях при активации 51 команды
flag_vkl_vikl_active_module = False
number_of_modules = len(opros.list_modules)
def all_vum_use():
global flag_cheks
if flag_cheks is True:
for j in range(1, number_of_modules + 1):
globals()['check{}'.format(j)].select()
opros.active_module[j - 1] = True
else:
for j in range(1, number_of_modules + 1):
globals()['check{}'.format(j)].deselect()
opros.active_module[j - 1] = False
flag_cheks = not flag_cheks
def all_vum_on_51():
global flag_cheks_on_51
if flag_cheks_on_51 is True:
for j in range(1, number_of_modules + 1):
globals()['check_51{}'.format(j)].select()
if opros.list_command[j-1] == '47':
opros.list_command[j-1] = '51'
opros.list_modules_command_vkl[j-1] = '00000002'
elif opros.list_command[j-1] == '49':
opros.list_command[j-1] = '51'
opros.list_modules_command_vkl[j-1] = '00000003'
else:
for j in range(1, number_of_modules + 1):
globals()['check_51{}'.format(j)].deselect()
opros.list_command[j-1] = opros.list_command_buffer[j-1]
opros.list_modules_command_vkl[j-1] = opros.list_modules_command_vkl_buffer[j-1]
flag_cheks_on_51 = not flag_cheks_on_51
def update_window():
while True:
ansvers = opros.return_ansver()
for i in range(len(ansvers)):
if len(ansvers[i]) < 10:
globals()['work{}_opros'.format(i + 1)]['text'] = 'обмена нет'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'red'
globals()['pitan{}_opros'.format(i + 1)]['text'] = '(.Y.)'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'red'
globals()['mod{}_opros'.format(i + 1)]['text'] = '0_o'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'red'
else:
if ansvers[i][6:8] == '47':
if ansvers[i][18:22] == '0007':
globals()['work{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
elif int(ansvers[i][18:22], 16) < 7:
globals()['work{}_opros'.format(i + 1)]['text'] = 'Откл'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'blue'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
else:
avaria_47(ansvers[i][18:22], i)
elif ansvers[i][6:8] == '49':
if ansvers[i][18:22] == '0007' and ansvers[i][36:40] == '0007':
globals()['work{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
elif int(ansvers[i][18:22], 16) < 7 and int(ansvers[i][36:40], 16) < 7:
globals()['work{}_opros'.format(i + 1)]['text'] = 'Откл'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'blue'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
else:
avaria_49(ansvers[i][18:22], ansvers[i][36:40], i)
elif ansvers[i][6:8] == '51' and ansvers[i][8:10] == '02':
if ansvers[i][36:40] == '0007':
globals()['work{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
elif int(ansvers[i][36:40], 16) < 7:
globals()['work{}_opros'.format(i + 1)]['text'] = 'Откл'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'blue'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
else:
avaria_51(ansvers[i][36:40], '0000', '0000', '0000', i)
elif ansvers[i][6:8] == '51' and ansvers[i][8:10] == '03':
if ansvers[i][36:40] == '0007' and ansvers[i][54:58] == '0000':
globals()['work{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
elif int(ansvers[i][36:40], 16) < 7:
globals()['work{}_opros'.format(i + 1)]['text'] = 'Откл'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'blue'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
else:
avaria_51(ansvers[i][36:40], ansvers[i][54:58], '0000', '0000', i)
elif ansvers[i][6:8] == '51' and ansvers[i][8:10] == '05':
if ansvers[i][36:40] == '0007' and ansvers[i][54:58] == '0000' and ansvers[i][72:76] == '0000' \
and ansvers[i][90:94] == '0000':
globals()['work{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
elif int(ansvers[i][36:40], 16) < 7:
globals()['work{}_opros'.format(i + 1)]['text'] = 'Откл'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'blue'
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'норм'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
else:
avaria_51(ansvers[i][36:40], ansvers[i][54:58], ansvers[i][72:76], ansvers[i][90:94], i)
else:
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
globals()['work{}_opros'.format(i + 1)]['text'] = 'аномалия'
time.sleep(0.1)
def stop_obmen():
opros.flag_on_opros = not opros.flag_on_opros
def choise_active_module(b):
opros.active_module[b] = not opros.active_module[b]
def vkl_pitan():
opros.active_parametrs[0] = not opros.active_parametrs[0]
def vkl_ip():
opros.active_parametrs[1] = not opros.active_parametrs[1]
def vkl_svch():
opros.active_parametrs[2] = not opros.active_parametrs[2]
def vkl_vikl_active_module():
global number_of_modules
global flag_vkl_vikl_active_module
flag_vkl_vikl_active_module = not flag_vkl_vikl_active_module
if flag_vkl_vikl_active_module is True:
button_vkl_vikl['text'] = 'ВЫКЛ'
button_vkl_vikl['bg'] = 'green yellow'
for i in range(number_of_modules):
if opros.active_module[i] is True:
if opros.active_parametrs[0] is True:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0] + '1' + \
opros.list_modules_command_vkl[i][2:]
else:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0] + '0' + \
opros.list_modules_command_vkl[i][2:]
if opros.active_parametrs[1] is True:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:3] + '1' + \
opros.list_modules_command_vkl[i][4:]
else:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:3] + '0' + \
opros.list_modules_command_vkl[i][4:]
if opros.active_parametrs[2] is True:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:5] + '1' + \
opros.list_modules_command_vkl[i][6:]
else:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:5] + '0' + \
opros.list_modules_command_vkl[i][6:]
else:
button_vkl_vikl['text'] = 'ВКЛ'
button_vkl_vikl['bg'] = 'snow3'
for i in range(number_of_modules):
if opros.active_module[i] is True:
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0] + '0' + \
opros.list_modules_command_vkl[i][2:]
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:3] + '0' + \
opros.list_modules_command_vkl[i][4:]
opros.list_modules_command_vkl[i] = opros.list_modules_command_vkl[i][0:5] + '0' + \
opros.list_modules_command_vkl[i][6:]
def avaria_47(ks, i): # приём в качестве аргумента 4 байта контрольного слова
ksb1 = list(bin(int(ks[:2], 16))[2:]) # превращение НЕХ значений в BIN значения
ksb2 = list(bin(int(ks[2:], 16))[2:]) # превращение НЕХ значений в BIN значения
ksb1 = ['0' for i in range(8 - len(ksb1))] + ksb1 # превращение BIN значений в массив из 8 битов
ksb2 = ['0' for i in range(8 - len(ksb2))] + ksb2 # превращение BIN значений в массив из 8 битов
if ksb1[-1] == '1' or ksb1[-2] == '1' or ksb1[-3] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Авария'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'red'
if ksb1[-1] == '1':
globals()['mod{}_opros'.format(i + 1)]['text'] = 'перегрузка по T/Q'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'red'
else:
if ksb1[-2] == '1':
globals()['mod{}_opros'.format(i + 1)]['text'] = 'Нет ИМ'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'red'
else:
globals()['mod{}_opros'.format(i + 1)]['text'] = 'Норма'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
if ksb1[-3] == '1':
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'Авария'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'red'
else:
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'Норма'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
else:
globals()['pitan{}_opros'.format(i + 1)]['text'] = 'Норма'
globals()['pitan{}_opros'.format(i + 1)]['fg'] = 'green'
globals()['mod{}_opros'.format(i + 1)]['text'] = 'Норма'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
if ksb2[-4] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Рвх меньше\nнормы'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
elif ksb2[-5] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Рвх больше\nнормы'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
elif ksb2[-6] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Рвых меньше\nнормы'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
elif ksb2[-7] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Ротр больше\nнормы'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
elif ksb2[-8] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Перегрев'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
elif ksb1[-4] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Датчик темп.\nнеисправен'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'orange'
def avaria_49(ks1, ks2, i):
ks1b1 = list(bin(int(ks1[:2], 16))[2:]) # превращение НЕХ значений в BIN значения
ks1b2 = list(bin(int(ks1[2:], 16))[2:]) # превращение НЕХ значений в BIN значения
ks2b1 = list(bin(int(ks2[:2], 16))[2:]) # превращение НЕХ значений в BIN значения
ks2b2 = list(bin(int(ks2[2:], 16))[2:]) # превращение НЕХ значений в BIN значения
ks1b1 = ['0' for i in range(8 - len(ks1b1))] + ks1b1 # превращение BIN значений в массив из 8 битов
ks1b2 = ['0' for i in range(8 - len(ks1b2))] + ks1b2 # превращение BIN значений в массив из 8 битов
ks2b1 = ['0' for i in range(8 - len(ks2b1))] + ks2b1 # превращение BIN значений в массив из 8 битов
ks2b2 = ['0' for i in range(8 - len(ks2b2))] + ks2b2 # превращение BIN значений в массив из 8 битов
if ks1b1[-1] == '1' or ks1b1[-2] == '1' or ks1b1[-3] == '1' or \
ks2b1[-1] == '1' or ks2b1[-2] == '1' or ks2b1[-3] == '1':
globals()['work{}_opros'.format(i + 1)]['text'] = 'Авария'
globals()['work{}_opros'.format(i + 1)]['fg'] = 'red'
if ks1b1[-1] == '1' or ks2b1[-1] == '1':
globals()['mod{}_opros'.format(i + 1)]['text'] = 'перегрузка\nпо T/Q'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'red'
else:
if ks1b1[-2] == '1' or ks2b1[-2] == '1':
globals()['mod{}_opros'.format(i + 1)]['text'] = 'Нет ИМ'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'red'
else:
globals()['mod{}_opros'.format(i + 1)]['text'] = 'Норма'
globals()['mod{}_opros'.format(i + 1)]['fg'] = 'green'
if ks1b1[-3] == '1' or ks2b1[-3] == '1':
globals()['pitan{}_opros'.format(i | |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
"""A script to evaluate test values for special functions in high precision.
This scripts looks for .csv files in /test/Tests/Data/SpecialFunctionsValues.
These files are expected to contain sets of arguments and expected result values
for some special functions.
Whenever the script encounters a file for which it has a defined function,
it evaluates that function for every set of arguments present in that file
and replaces the expected result in the file with the one it computed,
except for Infinite or NaN results, which are preserved.
.csv files are expected to have the header of the form
arg0,arg1,...,argN,expectedresult
use comma as a value separator, dot as a decimal separator, and
"Infinity", "-Infinity", and "NaN" to designate the corresponding values.
The correspondence between file names and functions is set in the pair_info
dictionary within the script.
To add a new test case, add a new row to the csv file using zero for the expectedresult.
Then run this script to replace the dummy value.
"""
from __future__ import division
import os
import csv
from mpmath import *
import time
mp.pretty = True
mp.dps = 500
output_dps = 50
def normal_cdf_moment_ratio(n, x):
if x < 0:
return power(2, -0.5 - n / 2) * hyperu(n / 2 + 0.5, 0.5, x * x / 2)
return exp(x * x / 4) * pcfu(0.5 + n, -x)
def normal_cdf2(x, y, r):
"""
This function produces correct results for inputs currently present in /test/Tests/Data/SpecialFunctionsValues.
Other inputs may fall into areas where currently present algorithms produce incorrect results and may require modifying this function.
"""
if x == -inf or y == -inf:
return mpf('0')
if x == inf:
return ncdf(y)
if y == inf:
return ncdf(x)
if r == mpf('1'):
return ncdf(min(x, y))
if r == mpf('-1'):
if x <= -y:
return mpf('0')
elif x > y:
return ncdf(y) - ncdf(-x)
else:
return ncdf(x) - ncdf(-y)
if abs(y) > abs(x):
z = x
x = y
y = z
# Avoid quadrature with r < 0 since it is sometimes inaccurate.
if r < 0 and x - y < 0:
# phi(x,y,r) = phi(inf,y,r) - phi(-x,y,-r)
# phi(x,y,r) = phi(x,inf,r) - phi(x,-y,-r)
return ncdf(x) - normal_cdf2(x, -y, -r)
if x > 0 and -x + y <= 0:
return ncdf(y) - normal_cdf2(-x,y,-r)
if x + y > 0:
# phi(x,y,r) = phi(-x,-y,r) + phi(x,y,-1)
return normal_cdf2(-x, -y, r) + normal_cdf2(x,y,-1)
def f(t):
if abs(t) == mpf('1'):
# When t = -1, (x*x+y*y-2*t*x*y) = (x+y)^2 >= 0
# When t = 1, (x*x+y*y-2*t*x*y) = (x-y)^2 >= 0
return mpf('0')
omt2 = (1 - t) * (1 + t)
return 1 / (2 * pi * sqrt(omt2)) * exp(-(x * x + y * y - 2 * t * x * y) / (2 * omt2))
omr2 = (1+r)*(1-r)
ymrx = y - r*x
def f2(t):
return npdf(t - x) * normal_cdf((ymrx + r*t)/omr2)
# This integral excludes normal_cdf2(x,y,-1)
# which will be zero when x+y <= 0
result, err = safe_quad(f, [-1, r])
if mpf(10)**output_dps * abs(err) > abs(result):
result, err = safe_quad(f2, [0, inf])
if mpf(10)**output_dps * abs(err) > abs(result):
print(f"Suspiciously big error when evaluating an integral for normal_cdf2({nstr(x)}, {nstr(y)}, {nstr(r)}).")
print(f"Integral: {nstr(result)}")
print(f"Integral error estimate: {nstr(err)}")
return result
def safe_quad(f, points):
verbose=False
# get a quick estimate of the result
estimate = quad(f, points, maxdegree=1, verbose=verbose)
if verbose:
print(f"Rescaling integrand by {nstr(1/estimate)}")
result, err = quad(lambda x: f(x)/estimate, points, error=True, verbose=verbose)
result *= estimate
err *= estimate
if mpf(10)**output_dps * abs(err) > abs(result):
estimate = result
if verbose:
print(f"Rescaling integrand by {nstr(1/estimate)}")
result, err = quad(lambda x: f(x)/estimate, points, error=True, verbose=verbose)
result *= estimate
err *= estimate
return result, err
def normal_cdf2_ln(x, y, r):
return ln(normal_cdf2(x, y, r))
def normal_cdf2_ratio_ln(x, y, r, sqrtomr2):
if sqrtomr2 < 0.618:
omr2 = sqrtomr2*sqrtomr2
r = sign(r)*sqrt(1 - omr2)
else:
omr2 = 1-r*r
return normal_cdf2_ln(x, y, r) + (x*x+y*y-2*r*x*y)/2/omr2 + log(2*pi)
def logistic_gaussian(m, v):
if m == inf:
if v == inf:
return inf
return mpf('1.0')
if v == inf:
return mpf('0.5')
logEpsilon = log(eps)
if 2*m + 4*v < logEpsilon:
return mpf(exp(m + v/2) * (1 - exp(m + 1.5 * v) * (1 - exp(m + 2.5 * v))))
tanhm = tanh(m)
# Not really a precise threshold, but fine for our data
if tanhm == mpf('1.0'):
return tanhm
# The integration routine below is obtained by substituting x = atanh(t)*sqrt(v)
# into the definition of logistic_gaussian
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / (1 + mpmath.exp(-x))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
# mpmath.quad uses exponential spacing between quadrature points, so we want the transformation to grow like log(x).
sqrtv = sqrt(v)
misqrtv = m/sqrtv
scale = max(10, m + sqrtv)/sqrtv
def f(t):
x = scale*atanh(t)
return exp(-(x - misqrtv) ** 2 / 2) / (1 + exp(-x*sqrtv)) / (1 - t * t)
coef = scale / sqrt(2 * pi)
points = [-1, 0, 1]
int, err = safe_quad(f, points)
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian({nstr(m)}, {nstr(v)}).")
print(f"Integral: {nstr(int)}")
print(f"integral error estimate: {nstr(err)}")
print(f"Coefficient: {nstr(coef)}")
print(f"Result (Coefficient * Integral): {nstr(result)}")
return result
def logistic_gaussian_deriv(m, v):
if m == inf or m == -inf or v == inf:
return mpf('0.0')
# The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian'
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / ((1 + mpmath.exp(-x)) * (1 + mpmath.exp(x)))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
def f(t):
one_minus_t_squared = 1 - t * t
return exp(-(atanh(t) - m) ** 2 / (2 * v)) / (one_minus_t_squared + sqrt(one_minus_t_squared))
coef = 0.5 / sqrt(2 * pi * v)
int, err = safe_quad(f, [-1, 1])
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian'({m}, {v}).")
print(f"Integral: {int}")
print(f"integral error estimate: {err}")
print(f"Coefficient: {coef}")
print(f"Result (Coefficient * Integral): {result}")
return result
def logistic_gaussian_deriv2(m, v):
if m == inf or m == -inf or v == inf or m == mpf(0):
return mpf(0)
# The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian''
#
# def f(x):
# expx = mpmath.exp(x)
# one_plus_expx = 1 + expx
# return mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) * (1 - expx) / ((1 + mpmath.exp(-x)) * one_plus_expx * one_plus_expx)
# coef = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf)
# int = mpmath.quad(f, [-mpmath.inf, mpmath.inf])
# result = coef * int
#
# Such substitution makes mpmath.quad call much faster.
def f(t):
one_minus_t = 1 - t
one_minus_t_squared = 1 - t * t
sqrt_one_minus_t_squared = sqrt(one_minus_t_squared)
return exp(-(atanh(t) - m) ** 2 / (2 * v)) * (one_minus_t - sqrt_one_minus_t_squared) / ((one_minus_t_squared + sqrt_one_minus_t_squared) * (one_minus_t + sqrt_one_minus_t_squared))
coef = 0.5 / sqrt(2 * pi * v)
int, err = safe_quad(f, [-1, 1])
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian''({m}, {v}).")
print(f"Integral: {nstr(int)}")
print(f"integral error estimate: {nstr(err)}")
print(f"Coefficient: {nstr(coef)}")
print(f"Result (Coefficient * Integral): {nstr(result)}")
return result
def normal_cdf(x):
"""
An alternate way of computing ncdf that avoids the bugs in ncdf
"""
return 0.5 * gammainc(0.5, x * x / 2, inf) / gamma(0.5)
def normal_pdf_ln(x):
return -x * x / 2 - log(sqrt(2 * pi))
def | |
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: int
self.grouped = grouped # type: TypeBool
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportMessageLinkRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': self.id,
'grouped': self.grouped
}
def __bytes__(self):
return b''.join((
b'cq\xb7\xce',
bytes(self.channel),
struct.pack('<i', self.id),
b'\xb5ur\x99' if self.grouped else b'7\x97y\xbc',
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_id = reader.read_int()
_grouped = reader.tgread_bool()
return ExportMessageLinkRequest(channel=_channel, id=_id, grouped=_grouped)
class GetAdminLogRequest(TLObject):
CONSTRUCTOR_ID = 0x33ddf480
SUBCLASS_OF_ID = 0x51f076bc
def __init__(self, channel, q, max_id, min_id, limit, events_filter=None, admins=None):
"""
:param InputChannel channel:
:param str q:
:param ChannelAdminLogEventsFilter | None events_filter:
:param list[InputUser] | None admins:
:param int max_id:
:param int min_id:
:param int limit:
:returns channels.AdminLogResults: Instance of AdminLogResults.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.q = q # type: str
self.events_filter = events_filter # type: Optional[TypeChannelAdminLogEventsFilter]
self.admins = admins # type: Optional[List[TypeInputUser]]
self.max_id = max_id # type: int
self.min_id = min_id # type: int
self.limit = limit # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.admins = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.admins] if self.admins else None
def to_dict(self):
return {
'_': 'GetAdminLogRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'q': self.q,
'events_filter': None if self.events_filter is None else self.events_filter.to_dict(),
'admins': [] if self.admins is None else [None if x is None else x.to_dict() for x in self.admins],
'max_id': self.max_id,
'min_id': self.min_id,
'limit': self.limit
}
def __bytes__(self):
return b''.join((
b'\x80\xf4\xdd3',
struct.pack('<I', (0 if self.events_filter is None or self.events_filter is False else 1) | (0 if self.admins is None or self.admins is False else 2)),
bytes(self.channel),
TLObject.serialize_bytes(self.q),
b'' if self.events_filter is None or self.events_filter is False else (bytes(self.events_filter)),
b'' if self.admins is None or self.admins is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.admins)),b''.join(bytes(x) for x in self.admins))),
struct.pack('<q', self.max_id),
struct.pack('<q', self.min_id),
struct.pack('<i', self.limit),
))
@staticmethod
def from_reader(reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_q = reader.tgread_string()
if flags & 1:
_events_filter = reader.tgread_object()
else:
_events_filter = None
if flags & 2:
reader.read_int()
_admins = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_admins.append(_x)
else:
_admins = None
_max_id = reader.read_long()
_min_id = reader.read_long()
_limit = reader.read_int()
return GetAdminLogRequest(channel=_channel, q=_q, max_id=_max_id, min_id=_min_id, limit=_limit, events_filter=_events_filter, admins=_admins)
class GetAdminedPublicChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0x8d8d82d7
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self):
super().__init__()
self.result = None
self.content_related = True
def to_dict(self):
return {
'_': 'GetAdminedPublicChannelsRequest'
}
def __bytes__(self):
return b''.join((
b'\xd7\x82\x8d\x8d',
))
@staticmethod
def from_reader(reader):
return GetAdminedPublicChannelsRequest()
class GetChannelsRequest(TLObject):
CONSTRUCTOR_ID = 0xa7f6bbb
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, id):
"""
:param list[InputChannel] id:
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
super().__init__()
self.result = None
self.content_related = True
self.id = id # type: List[TypeInputChannel]
def resolve(self, client, utils):
self.id = [utils.get_input_channel(client.get_input_entity(_x)) for _x in self.id]
def to_dict(self):
return {
'_': 'GetChannelsRequest',
'id': [] if self.id is None else [None if x is None else x.to_dict() for x in self.id]
}
def __bytes__(self):
return b''.join((
b'\xbbk\x7f\n',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(bytes(x) for x in self.id),
))
@staticmethod
def from_reader(reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return GetChannelsRequest(id=_id)
class GetFullChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x8736a09
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, channel):
"""
:param InputChannel channel:
:returns messages.ChatFull: Instance of ChatFull.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetFullChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\tjs\x08',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return GetFullChannelRequest(channel=_channel)
class GetMessagesRequest(TLObject):
CONSTRUCTOR_ID = 0x93d7b347
SUBCLASS_OF_ID = 0xd4b40b5e
def __init__(self, channel, id):
"""
:param InputChannel channel:
:param list[int] id:
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.id = id # type: List[int]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetMessagesRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'id': [] if self.id is None else self.id[:]
}
def __bytes__(self):
return b''.join((
b'G\xb3\xd7\x93',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return GetMessagesRequest(channel=_channel, id=_id)
class GetParticipantRequest(TLObject):
CONSTRUCTOR_ID = 0x546dd7a6
SUBCLASS_OF_ID = 0x6658151a
def __init__(self, channel, user_id):
"""
:param InputChannel channel:
:param InputUser user_id:
:returns channels.ChannelParticipant: Instance of ChannelParticipant.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.user_id = user_id # type: TypeInputUser
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'GetParticipantRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'user_id': None if self.user_id is None else self.user_id.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xa6\xd7mT',
bytes(self.channel),
bytes(self.user_id),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
return GetParticipantRequest(channel=_channel, user_id=_user_id)
class GetParticipantsRequest(TLObject):
CONSTRUCTOR_ID = 0x123e05e9
SUBCLASS_OF_ID = 0xe60a6e64
def __init__(self, channel, filter, offset, limit, hash):
"""
:param InputChannel channel:
:param ChannelParticipantsFilter filter:
:param int offset:
:param int limit:
:param int hash:
:returns channels.ChannelParticipants: Instance of either ChannelParticipants, ChannelParticipantsNotModified.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.filter = filter # type: TypeChannelParticipantsFilter
self.offset = offset # type: int
self.limit = limit # type: int
self.hash = hash # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetParticipantsRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'filter': None if self.filter is None else self.filter.to_dict(),
'offset': self.offset,
'limit': self.limit,
'hash': self.hash
}
def __bytes__(self):
return b''.join((
b'\xe9\x05>\x12',
bytes(self.channel),
bytes(self.filter),
struct.pack('<i', self.offset),
struct.pack('<i', self.limit),
struct.pack('<i', self.hash),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
_filter = reader.tgread_object()
_offset = reader.read_int()
_limit = reader.read_int()
_hash = reader.read_int()
return GetParticipantsRequest(channel=_channel, filter=_filter, offset=_offset, limit=_limit, hash=_hash)
class InviteToChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x199f3a6c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel, users):
"""
:param InputChannel channel:
:param list[InputUser] users:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.users = users # type: List[TypeInputUser]
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
self.users = [utils.get_input_user(client.get_input_entity(_x)) for _x in self.users]
def to_dict(self):
return {
'_': 'InviteToChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict(),
'users': [] if self.users is None else [None if x is None else x.to_dict() for x in self.users]
}
def __bytes__(self):
return b''.join((
b'l:\x9f\x19',
bytes(self.channel),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(bytes(x) for x in self.users),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return InviteToChannelRequest(channel=_channel, users=_users)
class JoinChannelRequest(TLObject):
CONSTRUCTOR_ID = 0x24b524c5
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'JoinChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xc5$\xb5$',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return JoinChannelRequest(channel=_channel)
class LeaveChannelRequest(TLObject):
CONSTRUCTOR_ID = 0xf836aa95
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel):
"""
:param InputChannel channel:
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'LeaveChannelRequest',
'channel': None if self.channel is None else self.channel.to_dict()
}
def __bytes__(self):
return b''.join((
b'\x95\xaa6\xf8',
bytes(self.channel),
))
@staticmethod
def from_reader(reader):
_channel = reader.tgread_object()
return LeaveChannelRequest(channel=_channel)
class ReadHistoryRequest(TLObject):
CONSTRUCTOR_ID = 0xcc104937
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel, max_id):
"""
:param InputChannel channel:
:param int max_id:
:returns Bool: This type has no constructors.
"""
super().__init__()
self.result = None
self.content_related = True
self.channel = channel # type: TypeInputChannel
self.max_id = max_id # type: int
def resolve(self, client, utils):
self.channel = utils.get_input_channel(client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadHistoryRequest',
'channel': None if self.channel is None else | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
import unittest
from utils_py import const
from utils_py import time_seg_util
class TestTimeSegUtil(unittest.TestCase):
def test_time_list_create(self):
first_minute_of_day = 0
self.assertEqual(time_seg_util.gen_still_seg_list(
first_minute_of_day, const.MINUTES_IN_A_DAY, seg_duration=30),
['00:00:00', '00:30:00', '01:00:00', '01:30:00',
'02:00:00', '02:30:00', '03:00:00', '03:30:00',
'04:00:00', '04:30:00', '05:00:00', '05:30:00',
'06:00:00', '06:30:00', '07:00:00', '07:30:00',
'08:00:00', '08:30:00', '09:00:00', '09:30:00',
'10:00:00', '10:30:00', '11:00:00', '11:30:00',
'12:00:00', '12:30:00', '13:00:00', '13:30:00',
'14:00:00', '14:30:00', '15:00:00', '15:30:00',
'16:00:00', '16:30:00', '17:00:00', '17:30:00',
'18:00:00', '18:30:00', '19:00:00', '19:30:00',
'20:00:00', '20:30:00', '21:00:00', '21:30:00',
'22:00:00', '22:30:00', '23:00:00', '23:30:00'])
self.assertEqual(time_seg_util.gen_slide_seg_list(
first_minute_of_day, const.MINUTES_IN_A_DAY, seg_duration=30,
slide_step=1),
['00:00:00', '00:01:00', '00:02:00', '00:03:00',
'00:04:00', '00:05:00', '00:06:00', '00:07:00',
'00:08:00', '00:09:00', '00:10:00', '00:11:00',
'00:12:00', '00:13:00', '00:14:00', '00:15:00',
'00:16:00', '00:17:00', '00:18:00', '00:19:00',
'00:20:00', '00:21:00', '00:22:00', '00:23:00',
'00:24:00', '00:25:00', '00:26:00', '00:27:00',
'00:28:00', '00:29:00', '00:30:00', '00:31:00',
'00:32:00', '00:33:00', '00:34:00', '00:35:00',
'00:36:00', '00:37:00', '00:38:00', '00:39:00',
'00:40:00', '00:41:00', '00:42:00', '00:43:00',
'00:44:00', '00:45:00', '00:46:00', '00:47:00',
'00:48:00', '00:49:00', '00:50:00', '00:51:00',
'00:52:00', '00:53:00', '00:54:00', '00:55:00',
'00:56:00', '00:57:00', '00:58:00', '00:59:00',
'01:00:00', '01:01:00', '01:02:00', '01:03:00',
'01:04:00', '01:05:00', '01:06:00', '01:07:00',
'01:08:00', '01:09:00', '01:10:00', '01:11:00',
'01:12:00', '01:13:00', '01:14:00', '01:15:00',
'01:16:00', '01:17:00', '01:18:00', '01:19:00',
'01:20:00', '01:21:00', '01:22:00', '01:23:00',
'01:24:00', '01:25:00', '01:26:00', '01:27:00',
'01:28:00', '01:29:00', '01:30:00', '01:31:00',
'01:32:00', '01:33:00', '01:34:00', '01:35:00',
'01:36:00', '01:37:00', '01:38:00', '01:39:00',
'01:40:00', '01:41:00', '01:42:00', '01:43:00',
'01:44:00', '01:45:00', '01:46:00', '01:47:00',
'01:48:00', '01:49:00', '01:50:00', '01:51:00',
'01:52:00', '01:53:00', '01:54:00', '01:55:00',
'01:56:00', '01:57:00', '01:58:00', '01:59:00',
'02:00:00', '02:01:00', '02:02:00', '02:03:00',
'02:04:00', '02:05:00', '02:06:00', '02:07:00',
'02:08:00', '02:09:00', '02:10:00', '02:11:00',
'02:12:00', '02:13:00', '02:14:00', '02:15:00',
'02:16:00', '02:17:00', '02:18:00', '02:19:00',
'02:20:00', '02:21:00', '02:22:00', '02:23:00',
'02:24:00', '02:25:00', '02:26:00', '02:27:00',
'02:28:00', '02:29:00', '02:30:00', '02:31:00',
'02:32:00', '02:33:00', '02:34:00', '02:35:00',
'02:36:00', '02:37:00', '02:38:00', '02:39:00',
'02:40:00', '02:41:00', '02:42:00', '02:43:00',
'02:44:00', '02:45:00', '02:46:00', '02:47:00',
'02:48:00', '02:49:00', '02:50:00', '02:51:00',
'02:52:00', '02:53:00', '02:54:00', '02:55:00',
'02:56:00', '02:57:00', '02:58:00', '02:59:00',
'03:00:00', '03:01:00', '03:02:00', '03:03:00',
'03:04:00', '03:05:00', '03:06:00', '03:07:00',
'03:08:00', '03:09:00', '03:10:00', '03:11:00',
'03:12:00', '03:13:00', '03:14:00', '03:15:00',
'03:16:00', '03:17:00', '03:18:00', '03:19:00',
'03:20:00', '03:21:00', '03:22:00', '03:23:00',
'03:24:00', '03:25:00', '03:26:00', '03:27:00',
'03:28:00', '03:29:00', '03:30:00', '03:31:00',
'03:32:00', '03:33:00', '03:34:00', '03:35:00',
'03:36:00', '03:37:00', '03:38:00', '03:39:00',
'03:40:00', '03:41:00', '03:42:00', '03:43:00',
'03:44:00', '03:45:00', '03:46:00', '03:47:00',
'03:48:00', '03:49:00', '03:50:00', '03:51:00',
'03:52:00', '03:53:00', '03:54:00', '03:55:00',
'03:56:00', '03:57:00', '03:58:00', '03:59:00',
'04:00:00', '04:01:00', '04:02:00', '04:03:00',
'04:04:00', '04:05:00', '04:06:00', '04:07:00',
'04:08:00', '04:09:00', '04:10:00', '04:11:00',
'04:12:00', '04:13:00', '04:14:00', '04:15:00',
'04:16:00', '04:17:00', '04:18:00', '04:19:00',
'04:20:00', '04:21:00', '04:22:00', '04:23:00',
'04:24:00', '04:25:00', '04:26:00', '04:27:00',
'04:28:00', '04:29:00', '04:30:00', '04:31:00',
'04:32:00', '04:33:00', '04:34:00', '04:35:00',
'04:36:00', '04:37:00', '04:38:00', '04:39:00',
'04:40:00', '04:41:00', '04:42:00', '04:43:00',
'04:44:00', '04:45:00', '04:46:00', '04:47:00',
'04:48:00', '04:49:00', '04:50:00', '04:51:00',
'04:52:00', '04:53:00', '04:54:00', '04:55:00',
'04:56:00', '04:57:00', '04:58:00', '04:59:00',
'05:00:00', '05:01:00', '05:02:00', '05:03:00',
'05:04:00', '05:05:00', '05:06:00', '05:07:00',
'05:08:00', '05:09:00', '05:10:00', '05:11:00',
'05:12:00', '05:13:00', '05:14:00', '05:15:00',
'05:16:00', '05:17:00', '05:18:00', '05:19:00',
'05:20:00', '05:21:00', '05:22:00', '05:23:00',
'05:24:00', '05:25:00', '05:26:00', '05:27:00',
'05:28:00', '05:29:00', '05:30:00', '05:31:00',
'05:32:00', '05:33:00', '05:34:00', '05:35:00',
'05:36:00', '05:37:00', '05:38:00', '05:39:00',
'05:40:00', '05:41:00', '05:42:00', '05:43:00',
'05:44:00', '05:45:00', '05:46:00', '05:47:00',
'05:48:00', '05:49:00', '05:50:00', '05:51:00',
'05:52:00', '05:53:00', '05:54:00', '05:55:00',
'05:56:00', '05:57:00', '05:58:00', '05:59:00',
'06:00:00', '06:01:00', '06:02:00', '06:03:00',
'06:04:00', '06:05:00', '06:06:00', '06:07:00',
'06:08:00', '06:09:00', '06:10:00', '06:11:00',
'06:12:00', '06:13:00', '06:14:00', '06:15:00',
'06:16:00', '06:17:00', '06:18:00', '06:19:00',
'06:20:00', '06:21:00', '06:22:00', '06:23:00',
'06:24:00', '06:25:00', '06:26:00', '06:27:00',
'06:28:00', '06:29:00', '06:30:00', '06:31:00',
'06:32:00', '06:33:00', '06:34:00', '06:35:00',
'06:36:00', '06:37:00', '06:38:00', '06:39:00',
'06:40:00', '06:41:00', '06:42:00', '06:43:00',
'06:44:00', '06:45:00', '06:46:00', '06:47:00',
'06:48:00', '06:49:00', '06:50:00', '06:51:00',
'06:52:00', '06:53:00', '06:54:00', '06:55:00',
'06:56:00', '06:57:00', '06:58:00', '06:59:00',
'07:00:00', '07:01:00', '07:02:00', '07:03:00',
'07:04:00', '07:05:00', '07:06:00', '07:07:00',
'07:08:00', '07:09:00', '07:10:00', '07:11:00',
'07:12:00', '07:13:00', '07:14:00', '07:15:00',
'07:16:00', '07:17:00', '07:18:00', '07:19:00',
'07:20:00', '07:21:00', '07:22:00', '07:23:00',
'07:24:00', '07:25:00', '07:26:00', '07:27:00',
'07:28:00', '07:29:00', '07:30:00', '07:31:00',
'07:32:00', '07:33:00', '07:34:00', '07:35:00',
'07:36:00', '07:37:00', '07:38:00', '07:39:00',
'07:40:00', '07:41:00', '07:42:00', '07:43:00',
'07:44:00', '07:45:00', '07:46:00', '07:47:00',
'07:48:00', '07:49:00', '07:50:00', '07:51:00',
'07:52:00', '07:53:00', '07:54:00', '07:55:00',
'07:56:00', '07:57:00', '07:58:00', '07:59:00',
'08:00:00', '08:01:00', '08:02:00', '08:03:00',
'08:04:00', '08:05:00', '08:06:00', '08:07:00',
'08:08:00', '08:09:00', '08:10:00', '08:11:00',
'08:12:00', '08:13:00', '08:14:00', '08:15:00',
'08:16:00', '08:17:00', '08:18:00', '08:19:00',
'08:20:00', '08:21:00', '08:22:00', '08:23:00',
'08:24:00', '08:25:00', '08:26:00', '08:27:00',
'08:28:00', '08:29:00', '08:30:00', '08:31:00',
'08:32:00', '08:33:00', '08:34:00', '08:35:00',
'08:36:00', '08:37:00', '08:38:00', '08:39:00',
'08:40:00', '08:41:00', '08:42:00', '08:43:00',
'08:44:00', '08:45:00', '08:46:00', '08:47:00',
'08:48:00', '08:49:00', '08:50:00', '08:51:00',
'08:52:00', '08:53:00', '08:54:00', '08:55:00',
'08:56:00', '08:57:00', '08:58:00', '08:59:00',
'09:00:00', '09:01:00', '09:02:00', '09:03:00',
'09:04:00', '09:05:00', '09:06:00', '09:07:00',
'09:08:00', '09:09:00', '09:10:00', '09:11:00',
'09:12:00', '09:13:00', '09:14:00', '09:15:00',
'09:16:00', '09:17:00', '09:18:00', '09:19:00',
'09:20:00', '09:21:00', '09:22:00', '09:23:00',
'09:24:00', '09:25:00', '09:26:00', '09:27:00',
'09:28:00', '09:29:00', '09:30:00', '09:31:00',
'09:32:00', '09:33:00', '09:34:00', '09:35:00',
'09:36:00', '09:37:00', '09:38:00', '09:39:00',
'09:40:00', '09:41:00', '09:42:00', '09:43:00',
'09:44:00', '09:45:00', '09:46:00', '09:47:00',
'09:48:00', '09:49:00', '09:50:00', '09:51:00',
'09:52:00', '09:53:00', '09:54:00', '09:55:00',
'09:56:00', '09:57:00', '09:58:00', '09:59:00',
'10:00:00', '10:01:00', '10:02:00', '10:03:00',
'10:04:00', '10:05:00', '10:06:00', '10:07:00',
'10:08:00', '10:09:00', '10:10:00', '10:11:00',
'10:12:00', '10:13:00', '10:14:00', '10:15:00',
'10:16:00', '10:17:00', '10:18:00', '10:19:00',
'10:20:00', '10:21:00', '10:22:00', '10:23:00',
'10:24:00', '10:25:00', '10:26:00', '10:27:00',
'10:28:00', '10:29:00', '10:30:00', '10:31:00',
'10:32:00', '10:33:00', '10:34:00', '10:35:00',
'10:36:00', '10:37:00', '10:38:00', '10:39:00',
'10:40:00', '10:41:00', '10:42:00', '10:43:00',
'10:44:00', '10:45:00', '10:46:00', '10:47:00',
'10:48:00', '10:49:00', '10:50:00', '10:51:00',
'10:52:00', '10:53:00', '10:54:00', '10:55:00',
'10:56:00', '10:57:00', '10:58:00', '10:59:00',
'11:00:00', '11:01:00', '11:02:00', '11:03:00',
'11:04:00', '11:05:00', '11:06:00', '11:07:00',
'11:08:00', '11:09:00', '11:10:00', '11:11:00',
'11:12:00', '11:13:00', '11:14:00', '11:15:00',
'11:16:00', '11:17:00', '11:18:00', '11:19:00',
'11:20:00', '11:21:00', '11:22:00', '11:23:00',
'11:24:00', '11:25:00', '11:26:00', '11:27:00',
'11:28:00', '11:29:00', '11:30:00', '11:31:00',
'11:32:00', '11:33:00', '11:34:00', '11:35:00',
'11:36:00', '11:37:00', '11:38:00', '11:39:00',
'11:40:00', '11:41:00', '11:42:00', '11:43:00',
'11:44:00', '11:45:00', '11:46:00', '11:47:00',
'11:48:00', '11:49:00', '11:50:00', '11:51:00',
'11:52:00', '11:53:00', '11:54:00', '11:55:00',
'11:56:00', '11:57:00', '11:58:00', '11:59:00',
'12:00:00', '12:01:00', '12:02:00', '12:03:00',
'12:04:00', '12:05:00', '12:06:00', '12:07:00',
'12:08:00', '12:09:00', '12:10:00', '12:11:00',
'12:12:00', '12:13:00', '12:14:00', '12:15:00',
'12:16:00', '12:17:00', '12:18:00', '12:19:00',
'12:20:00', '12:21:00', '12:22:00', '12:23:00',
'12:24:00', '12:25:00', '12:26:00', '12:27:00',
'12:28:00', '12:29:00', '12:30:00', '12:31:00',
'12:32:00', '12:33:00', '12:34:00', '12:35:00',
'12:36:00', '12:37:00', '12:38:00', '12:39:00',
'12:40:00', '12:41:00', '12:42:00', '12:43:00',
'12:44:00', '12:45:00', '12:46:00', '12:47:00',
'12:48:00', '12:49:00', '12:50:00', '12:51:00',
'12:52:00', '12:53:00', '12:54:00', '12:55:00',
'12:56:00', '12:57:00', '12:58:00', '12:59:00',
'13:00:00', '13:01:00', '13:02:00', '13:03:00',
'13:04:00', '13:05:00', '13:06:00', '13:07:00',
'13:08:00', '13:09:00', '13:10:00', '13:11:00',
'13:12:00', '13:13:00', '13:14:00', '13:15:00',
'13:16:00', '13:17:00', '13:18:00', '13:19:00',
'13:20:00', '13:21:00', '13:22:00', '13:23:00',
'13:24:00', '13:25:00', '13:26:00', '13:27:00',
'13:28:00', '13:29:00', '13:30:00', '13:31:00',
'13:32:00', '13:33:00', '13:34:00', '13:35:00',
'13:36:00', '13:37:00', '13:38:00', '13:39:00',
'13:40:00', '13:41:00', '13:42:00', '13:43:00',
'13:44:00', '13:45:00', '13:46:00', '13:47:00',
'13:48:00', '13:49:00', '13:50:00', '13:51:00',
'13:52:00', '13:53:00', '13:54:00', '13:55:00',
'13:56:00', '13:57:00', '13:58:00', '13:59:00',
'14:00:00', '14:01:00', '14:02:00', '14:03:00',
'14:04:00', '14:05:00', '14:06:00', '14:07:00',
'14:08:00', '14:09:00', '14:10:00', '14:11:00',
'14:12:00', '14:13:00', '14:14:00', '14:15:00',
'14:16:00', '14:17:00', '14:18:00', '14:19:00',
'14:20:00', '14:21:00', '14:22:00', '14:23:00',
'14:24:00', '14:25:00', '14:26:00', '14:27:00',
'14:28:00', '14:29:00', '14:30:00', '14:31:00',
'14:32:00', '14:33:00', '14:34:00', '14:35:00',
'14:36:00', '14:37:00', '14:38:00', '14:39:00',
'14:40:00', '14:41:00', '14:42:00', '14:43:00',
'14:44:00', '14:45:00', '14:46:00', '14:47:00',
'14:48:00', '14:49:00', '14:50:00', '14:51:00',
'14:52:00', '14:53:00', '14:54:00', '14:55:00',
'14:56:00', '14:57:00', '14:58:00', '14:59:00',
'15:00:00', '15:01:00', '15:02:00', '15:03:00',
'15:04:00', '15:05:00', '15:06:00', '15:07:00',
'15:08:00', '15:09:00', '15:10:00', '15:11:00',
'15:12:00', '15:13:00', '15:14:00', '15:15:00',
'15:16:00', '15:17:00', '15:18:00', '15:19:00',
'15:20:00', '15:21:00', '15:22:00', '15:23:00',
'15:24:00', '15:25:00', '15:26:00', '15:27:00',
'15:28:00', '15:29:00', '15:30:00', '15:31:00',
'15:32:00', '15:33:00', '15:34:00', '15:35:00',
'15:36:00', '15:37:00', '15:38:00', '15:39:00',
'15:40:00', '15:41:00', '15:42:00', '15:43:00',
'15:44:00', '15:45:00', '15:46:00', '15:47:00',
'15:48:00', '15:49:00', '15:50:00', '15:51:00',
'15:52:00', '15:53:00', '15:54:00', '15:55:00',
'15:56:00', '15:57:00', '15:58:00', '15:59:00',
'16:00:00', '16:01:00', '16:02:00', '16:03:00',
'16:04:00', '16:05:00', '16:06:00', '16:07:00',
'16:08:00', '16:09:00', '16:10:00', '16:11:00',
'16:12:00', '16:13:00', '16:14:00', '16:15:00',
'16:16:00', '16:17:00', '16:18:00', '16:19:00',
'16:20:00', '16:21:00', '16:22:00', '16:23:00',
'16:24:00', '16:25:00', '16:26:00', '16:27:00',
'16:28:00', '16:29:00', '16:30:00', '16:31:00',
'16:32:00', '16:33:00', '16:34:00', '16:35:00',
'16:36:00', '16:37:00', '16:38:00', '16:39:00',
'16:40:00', '16:41:00', '16:42:00', '16:43:00',
'16:44:00', '16:45:00', '16:46:00', '16:47:00',
'16:48:00', '16:49:00', '16:50:00', '16:51:00',
'16:52:00', '16:53:00', '16:54:00', '16:55:00',
'16:56:00', '16:57:00', '16:58:00', '16:59:00',
'17:00:00', '17:01:00', '17:02:00', '17:03:00',
'17:04:00', '17:05:00', '17:06:00', '17:07:00',
'17:08:00', '17:09:00', '17:10:00', '17:11:00',
'17:12:00', '17:13:00', '17:14:00', '17:15:00',
'17:16:00', '17:17:00', '17:18:00', '17:19:00',
'17:20:00', '17:21:00', '17:22:00', '17:23:00',
'17:24:00', '17:25:00', '17:26:00', '17:27:00',
'17:28:00', '17:29:00', '17:30:00', '17:31:00',
'17:32:00', '17:33:00', '17:34:00', '17:35:00',
'17:36:00', '17:37:00', '17:38:00', '17:39:00',
'17:40:00', '17:41:00', '17:42:00', '17:43:00',
'17:44:00', '17:45:00', '17:46:00', '17:47:00',
'17:48:00', '17:49:00', '17:50:00', '17:51:00',
'17:52:00', '17:53:00', '17:54:00', '17:55:00',
'17:56:00', '17:57:00', '17:58:00', '17:59:00',
'18:00:00', '18:01:00', '18:02:00', '18:03:00',
'18:04:00', '18:05:00', '18:06:00', '18:07:00',
'18:08:00', '18:09:00', '18:10:00', '18:11:00',
'18:12:00', '18:13:00', '18:14:00', '18:15:00',
'18:16:00', '18:17:00', '18:18:00', '18:19:00',
'18:20:00', '18:21:00', '18:22:00', '18:23:00',
'18:24:00', '18:25:00', '18:26:00', '18:27:00',
'18:28:00', '18:29:00', '18:30:00', '18:31:00',
'18:32:00', '18:33:00', '18:34:00', '18:35:00',
'18:36:00', '18:37:00', '18:38:00', '18:39:00',
'18:40:00', '18:41:00', '18:42:00', '18:43:00',
'18:44:00', '18:45:00', '18:46:00', '18:47:00',
'18:48:00', '18:49:00', '18:50:00', '18:51:00',
'18:52:00', '18:53:00', '18:54:00', '18:55:00',
'18:56:00', '18:57:00', '18:58:00', '18:59:00',
'19:00:00', '19:01:00', '19:02:00', '19:03:00',
'19:04:00', '19:05:00', '19:06:00', '19:07:00',
'19:08:00', '19:09:00', '19:10:00', '19:11:00',
'19:12:00', '19:13:00', '19:14:00', '19:15:00',
'19:16:00', '19:17:00', '19:18:00', '19:19:00',
'19:20:00', '19:21:00', '19:22:00', '19:23:00',
'19:24:00', '19:25:00', '19:26:00', '19:27:00',
'19:28:00', '19:29:00', '19:30:00', '19:31:00',
'19:32:00', '19:33:00', '19:34:00', '19:35:00',
'19:36:00', '19:37:00', '19:38:00', '19:39:00',
'19:40:00', '19:41:00', '19:42:00', '19:43:00',
'19:44:00', '19:45:00', '19:46:00', '19:47:00',
'19:48:00', '19:49:00', '19:50:00', '19:51:00',
'19:52:00', '19:53:00', '19:54:00', '19:55:00',
'19:56:00', '19:57:00', '19:58:00', '19:59:00',
'20:00:00', '20:01:00', | |
than alat!!
info['celldm(1)'] = float(line.split()[1]) * units['Bohr']
info['alat'] = info['celldm(1)']
elif 'number of atoms/cell' in line:
info['nat'] = int(line.split()[-1])
elif 'number of atomic types' in line:
info['ntyp'] = int(line.split()[-1])
elif 'crystal axes:' in line:
info['cell'] = info['celldm(1)'] * np.array([
[float(x) for x in lines[idx + 1].split()[3:6]],
[float(x) for x in lines[idx + 2].split()[3:6]],
[float(x) for x in lines[idx + 3].split()[3:6]]])
elif 'positions (alat units)' in line:
info['symbols'] = [
label_to_symbol(at_line.split()[1])
for at_line in lines[idx + 1:idx + 1 + info['nat']]]
info['positions'] = [
[float(x) * info['celldm(1)'] for x in at_line.split()[6:9]]
for at_line in lines[idx + 1:idx + 1 + info['nat']]]
# This should be the end of interesting info.
# Break here to avoid dealing with large lists of kpoints.
# Will need to be extended for DFTCalculator info.
break
# Make atoms for convenience
info['atoms'] = Atoms(symbols=info['symbols'],
positions=info['positions'],
cell=info['cell'], pbc=True)
return info
def read_espresso_in(fileobj):
"""Parse a Quantum ESPRESSO input files, '.in', '.pwi'.
ESPRESSO inputs are generally a fortran-namelist format with custom
blocks of data. The namelist is parsed as a dict and an atoms object
is constructed from the included information.
Parameters
----------
fileobj : file | str
A file-like object that supports line iteration with the contents
of the input file, or a filename.
Returns
-------
atoms : Atoms
Structure defined in the input file.
Raises
------
KeyError
Raised for missing keys that are required to process the file
"""
# TODO: use ase opening mechanisms
if isinstance(fileobj, basestring):
fileobj = open(fileobj, 'rU')
# parse namelist section and extract remaining lines
data, card_lines = read_fortran_namelist(fileobj)
# get the cell if ibrav=0
if 'system' not in data:
raise KeyError('Required section &SYSTEM not found.')
elif 'ibrav' not in data['system']:
raise KeyError('ibrav is required in &SYSTEM')
elif data['system']['ibrav'] == 0:
# celldm(1) is in Bohr, A is in angstrom. celldm(1) will be
# used even if A is also specified.
if 'celldm(1)' in data['system']:
alat = data['system']['celldm(1)'] * units['Bohr']
elif 'A' in data['system']:
alat = data['system']['A']
else:
alat = None
cell, cell_alat = get_cell_parameters(card_lines, alat=alat)
else:
alat, cell = ibrav_to_cell(data['system'])
positions_card = get_atomic_positions(
card_lines, n_atoms=data['system']['nat'], cell=cell, alat=alat)
symbols = [label_to_symbol(position[0]) for position in positions_card]
positions = [position[1] for position in positions_card]
# TODO: put more info into the atoms object
# e.g magmom, force constraints
atoms = Atoms(symbols=symbols, positions=positions, cell=cell, pbc=True)
return atoms
def ibrav_to_cell(system):
"""
Convert a value of ibrav to a cell. Any unspecified lattice dimension
is set to 0.0, but will not necessarily raise an error. Also return the
lattice parameter.
Parameters
----------
system : dict
The &SYSTEM section of the input file, containing the 'ibrav' setting,
and either celldm(1)..(6) or a, b, c, cosAB, cosAC, cosBC.
Returns
-------
alat, cell : float, np.array
Cell parameter in Angstrom, and
The 3x3 array representation of the cell.
Raises
------
KeyError
Raise an error if any required keys are missing.
NotImplementedError
Only a limited number of ibrav settings can be parsed. An error
is raised if the ibrav interpretation is not implemented.
"""
if 'celldm(1)' in system and 'a' in system:
raise KeyError('do not specify both celldm and a,b,c!')
elif 'celldm(1)' in system:
# celldm(x) in bohr
alat = system['celldm(1)'] * units['Bohr']
b_over_a = system.get('celldm(2)', 0.0)
c_over_a = system.get('celldm(3)', 0.0)
cosab = system.get('celldm(4)', 0.0)
cosac = system.get('celldm(5)', 0.0)
cosbc = 0.0
if system['ibrav'] == 14:
cosbc = system.get('celldm(4)', 0.0)
cosac = system.get('celldm(5)', 0.0)
cosab = system.get('celldm(6)', 0.0)
elif 'a' in system:
# a, b, c, cosAB, cosAC, cosBC in Angstrom
alat = system['a']
b_over_a = system.get('b', 0.0) / alat
c_over_a = system.get('c', 0.0) / alat
cosab = system.get('cosab', 0.0)
cosac = system.get('cosac', 0.0)
cosbc = system.get('cosbc', 0.0)
else:
raise KeyError("Missing celldm(1) or a cell parameter.")
if system['ibrav'] == 1:
cell = np.identity(3) * alat
elif system['ibrav'] == 2:
cell = np.array([[-1.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[-1.0, 1.0, 0.0]]) * (alat / 2)
elif system['ibrav'] == 3:
cell = np.array([[1.0, 1.0, 1.0],
[-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0]]) * (alat / 2)
elif system['ibrav'] == -3:
cell = np.array([[-1.0, 1.0, 1.0],
[1.0, -1.0, 1.0],
[1.0, 1.0, -1.0]]) * (alat / 2)
elif system['ibrav'] == 4:
cell = np.array([[1.0, 0.0, 0.0],
[-0.5, 0.5*3**0.5, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == 5:
tx = ((1.0 - cosab) / 2.0)**0.5
ty = ((1.0 - cosab) / 6.0)**0.5
tz = ((1 + 2 * cosab) / 3.0)**0.5
cell = np.array([[tx, -ty, tz],
[0, 2*ty, tz],
[-tx, -ty, tz]]) * alat
elif system['ibrav'] == -5:
ty = ((1.0 - cosab) / 6.0)**0.5
tz = ((1 + 2 * cosab) / 3.0)**0.5
a_prime = alat / 3**0.5
u = tz - 2 * 2**0.5 * ty
v = tz + 2**0.5 * ty
cell = np.array([[u, v, v],
[v, u, v],
[v, v, u]]) * a_prime
elif system['ibrav'] == 6:
cell = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == 7:
cell = np.array([[1.0, -1.0, c_over_a],
[1.0, 1.0, c_over_a],
[-1.0, -1.0, c_over_a]]) * (alat / 2)
elif system['ibrav'] == 8:
cell = np.array([[1.0, 0.0, 0.0],
[0.0, b_over_a, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == 9:
cell = np.array([[1.0 / 2.0, b_over_a / 2.0, 0.0],
[-1.0 / 2.0, b_over_a / 2.0, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == -9:
cell = np.array([[1.0 / 2.0, -b_over_a / 2.0, 0.0],
[1.0 / 2.0, b_over_a / 2.0, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == 10:
cell = np.array([[1.0 / 2.0, 0.0, c_over_a/2.0],
[1.0 / 2.0, b_over_a / 2.0, 0.0],
[0.0, b_over_a / 2.0, c_over_a / 2.0]]) * alat
elif system['ibrav'] == 11:
cell = np.array([[1.0 / 2.0, b_over_a / 2.0, c_over_a / 2.0],
[-1.0 / 2.0, b_over_a / 2.0, c_over_a / 2.0],
[-1.0, 2.0, -b_over_a / 2.0, c_over_a / 2.0]]) * alat
elif system['ibrav'] == 12:
sinab = (1.0 - cosab**2)**0.5
cell = np.array([[1.0, 0.0, 0.0],
[b_over_a * cosab, b_over_a * sinab, 0.0],
[0.0, 0.0, c_over_a]]) * alat
elif system['ibrav'] == -12:
sinac = (1.0 - cosac**2)**0.5
cell = np.array([[1.0, 0.0, 0.0],
[0.0, b_over_a, 0.0],
[c_over_a * cosac, 0.0, c_over_a * sinac]]) * alat
elif system['ibrav'] == 13:
sinab = (1.0 - cosab**2)**0.5
cell = np.array([[1.0 / 2.0, 0.0, -c_over_a / 2.0],
[b_over_a * cosab, b_over_a * sinab, 0.0],
[1.0 / 2.0, 0.0, c_over_a / 2.0]]) * alat
elif system['ibrav'] == 14:
sinab = (1.0 - cosab**2)**0.5
v3 = [c_over_a * cosac,
c_over_a * (cosbc - cosac * cosab) / sinab,
c_over_a * ((1 + 2 * cosbc * cosac * cosab
- cosbc**2 - cosac**2 - cosab**2)**0.5) / sinab]
cell = np.array([[1.0, 0.0, 0.0],
[b_over_a * cosab, b_over_a * sinab, 0.0],
v3]) * alat
else:
raise NotImplementedError('ibrav = {0} is not implemented'
''.format(system['ibrav']))
return alat, cell
def get_atomic_positions(lines, n_atoms, cell=None, alat=None):
"""Parse atom positions from ATOMIC_POSITIONS card.
Parameters
----------
lines : list[str]
A list of lines containing the ATOMIC_POSITIONS card.
n_atoms : int
Expected number of atoms. Only this many lines will be parsed.
cell : np.array
Unit cell of the crystal. Only used with crystal coordinates.
alat : float
Lattice parameter for atomic coordinates. Only used for alat case.
Returns
-------
positions : list[(str, (float, float, float), (float, float, float))]
A list of the ordered atomic positions in the format:
label, (x, y, z), (if_x, if_y, if_z)
Force multipliers are set to None if not present.
Raises
------
ValueError
Any problems parsing the data result in ValueError
"""
positions = None
# no blanks or comment lines, can the consume n_atoms lines for positions
trimmed_lines = (line for line in lines
if line.strip() and not line[0] == '#')
for line in trimmed_lines:
if line.strip().startswith('ATOMIC_POSITIONS'):
if positions is not None:
raise ValueError('Multiple ATOMIC_POSITIONS specified')
# Priority and behaviour tested with QE 5.3
if 'crystal_sg' in line.lower():
raise NotImplementedError('CRYSTAL_SG not implemented')
elif 'crystal' in line.lower():
cell = cell
elif | |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from unittest import TestCase
import pytest
import torch
from torch.distributions import constraints
import pyro
import pyro.contrib.gp.kernels as kernels
import pyro.distributions as dist
import pyro.optim as optim
from pyro import poutine
from pyro.distributions.testing import fakes
from pyro.distributions.testing.rejection_gamma import ShapeAugmentedGamma
from pyro.infer import (SVI, EnergyDistance, JitTrace_ELBO, JitTraceEnum_ELBO, JitTraceGraph_ELBO, RenyiELBO,
ReweightedWakeSleep, Trace_ELBO, Trace_MMD, TraceEnum_ELBO, TraceGraph_ELBO,
TraceMeanField_ELBO, TraceTailAdaptive_ELBO)
from pyro.infer.autoguide import AutoDelta
from pyro.infer.reparam import LatentStableReparam
from pyro.infer.util import torch_item
from tests.common import assert_close, assert_equal, xfail_if_not_implemented, xfail_param
logger = logging.getLogger(__name__)
def param_mse(name, target):
return torch.sum(torch.pow(target - pyro.param(name), 2.0)).item()
def param_abs_error(name, target):
return torch.sum(torch.abs(target - pyro.param(name))).item()
@pytest.mark.stage("integration", "integration_batch_1")
class NormalNormalTests(TestCase):
def setUp(self):
# normal-normal; known covariance
self.lam0 = torch.tensor([0.1, 0.1]) # precision of prior
self.loc0 = torch.tensor([0.0, 0.5]) # prior mean
# known precision of observation noise
self.lam = torch.tensor([6.0, 4.0])
self.data = torch.tensor([[-0.1, 0.3],
[0.00, 0.4],
[0.20, 0.5],
[0.10, 0.7]])
self.n_data = torch.tensor([float(len(self.data))])
self.data_sum = self.data.sum(0)
self.analytic_lam_n = self.lam0 + self.n_data.expand_as(self.lam) * self.lam
self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
self.analytic_loc_n = self.data_sum * (self.lam / self.analytic_lam_n) +\
self.loc0 * (self.lam0 / self.analytic_lam_n)
self.batch_size = 4
self.sample_batch_size = 2
def test_elbo_reparameterized(self):
self.do_elbo_test(True, 5000, Trace_ELBO())
def test_elbo_analytic_kl(self):
self.do_elbo_test(True, 3000, TraceMeanField_ELBO())
def test_elbo_tail_adaptive(self):
self.do_elbo_test(True, 3000, TraceTailAdaptive_ELBO(num_particles=10, vectorize_particles=True))
def test_elbo_nonreparameterized(self):
self.do_elbo_test(False, 15000, Trace_ELBO())
def test_renyi_reparameterized(self):
self.do_elbo_test(True, 2500, RenyiELBO(num_particles=3, vectorize_particles=False))
def test_renyi_nonreparameterized(self):
self.do_elbo_test(False, 7500, RenyiELBO(num_particles=3, vectorize_particles=True))
def test_rws_reparameterized(self):
self.do_elbo_test(True, 2500, ReweightedWakeSleep(num_particles=3))
def test_rws_nonreparameterized(self):
self.do_elbo_test(False, 7500, ReweightedWakeSleep(num_particles=3))
def test_mmd_vectorized(self):
z_size = self.loc0.shape[0]
self.do_fit_prior_test(
True, 1000, Trace_MMD(
kernel=kernels.RBF(
z_size,
lengthscale=torch.sqrt(torch.tensor(z_size, dtype=torch.float))
), vectorize_particles=True, num_particles=100
)
)
def test_mmd_nonvectorized(self):
z_size = self.loc0.shape[0]
self.do_fit_prior_test(
True, 100, Trace_MMD(
kernel=kernels.RBF(
z_size,
lengthscale=torch.sqrt(torch.tensor(z_size, dtype=torch.float))
), vectorize_particles=False, num_particles=100
), lr=0.0146
)
def do_elbo_test(self, reparameterized, n_steps, loss):
pyro.clear_param_store()
def model():
loc_latent = pyro.sample("loc_latent",
dist.Normal(self.loc0, torch.pow(self.lam0, -0.5))
.to_event(1))
with pyro.plate('data', self.batch_size):
pyro.sample("obs",
dist.Normal(loc_latent, torch.pow(self.lam, -0.5)).to_event(1),
obs=self.data)
return loc_latent
def guide():
loc_q = pyro.param("loc_q", self.analytic_loc_n.detach() + 0.134)
log_sig_q = pyro.param("log_sig_q", self.analytic_log_sig_n.data.detach() - 0.14)
sig_q = torch.exp(log_sig_q)
Normal = dist.Normal if reparameterized else fakes.NonreparameterizedNormal
pyro.sample("loc_latent", Normal(loc_q, sig_q).to_event(1))
adam = optim.Adam({"lr": .001})
svi = SVI(model, guide, adam, loss=loss)
for k in range(n_steps):
svi.step()
loc_error = param_mse("loc_q", self.analytic_loc_n)
log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)
assert_equal(0.0, loc_error, prec=0.05)
assert_equal(0.0, log_sig_error, prec=0.05)
def do_fit_prior_test(self, reparameterized, n_steps, loss, debug=False, lr=0.001):
pyro.clear_param_store()
def model():
with pyro.plate('samples', self.sample_batch_size):
pyro.sample(
"loc_latent", dist.Normal(
torch.stack([self.loc0]*self.sample_batch_size, dim=0),
torch.stack([torch.pow(self.lam0, -0.5)]*self.sample_batch_size, dim=0)
).to_event(1)
)
def guide():
loc_q = pyro.param("loc_q", self.loc0.detach() + 0.134)
log_sig_q = pyro.param("log_sig_q", -0.5*torch.log(self.lam0).data.detach() - 0.14)
sig_q = torch.exp(log_sig_q)
Normal = dist.Normal if reparameterized else fakes.NonreparameterizedNormal
with pyro.plate('samples', self.sample_batch_size):
pyro.sample(
"loc_latent", Normal(
torch.stack([loc_q]*self.sample_batch_size, dim=0),
torch.stack([sig_q]*self.sample_batch_size, dim=0)
).to_event(1)
)
adam = optim.Adam({"lr": lr})
svi = SVI(model, guide, adam, loss=loss)
alpha = 0.99
for k in range(n_steps):
svi.step()
if debug:
loc_error = param_mse("loc_q", self.loc0)
log_sig_error = param_mse("log_sig_q", -0.5*torch.log(self.lam0))
with torch.no_grad():
if k == 0:
avg_loglikelihood, avg_penalty = loss._differentiable_loss_parts(model, guide)
avg_loglikelihood = torch_item(avg_loglikelihood)
avg_penalty = torch_item(avg_penalty)
loglikelihood, penalty = loss._differentiable_loss_parts(model, guide)
avg_loglikelihood = alpha * avg_loglikelihood + (1-alpha) * torch_item(loglikelihood)
avg_penalty = alpha * avg_penalty + (1-alpha) * torch_item(penalty)
if k % 100 == 0:
print(loc_error, log_sig_error)
print(avg_loglikelihood, avg_penalty)
print()
loc_error = param_mse("loc_q", self.loc0)
log_sig_error = param_mse("log_sig_q", -0.5 * torch.log(self.lam0))
assert_equal(0.0, loc_error, prec=0.05)
assert_equal(0.0, log_sig_error, prec=0.05)
class TestFixedModelGuide(TestCase):
def setUp(self):
self.data = torch.tensor([2.0])
self.alpha_q_log_0 = 0.17 * torch.ones(1)
self.beta_q_log_0 = 0.19 * torch.ones(1)
self.alpha_p_log_0 = 0.11 * torch.ones(1)
self.beta_p_log_0 = 0.13 * torch.ones(1)
def do_test_fixedness(self, fixed_parts):
pyro.clear_param_store()
def model():
alpha_p_log = pyro.param(
"alpha_p_log", self.alpha_p_log_0.clone())
beta_p_log = pyro.param(
"beta_p_log", self.beta_p_log_0.clone())
alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
lambda_latent = pyro.sample("lambda_latent", dist.Gamma(alpha_p, beta_p))
pyro.sample("obs", dist.Poisson(lambda_latent), obs=self.data)
return lambda_latent
def guide():
alpha_q_log = pyro.param(
"alpha_q_log", self.alpha_q_log_0.clone())
beta_q_log = pyro.param(
"beta_q_log", self.beta_q_log_0.clone())
alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
pyro.sample("lambda_latent", dist.Gamma(alpha_q, beta_q))
def per_param_args(module_name, param_name):
if 'model' in fixed_parts and 'p_' in param_name:
return {'lr': 0.0}
if 'guide' in fixed_parts and 'q_' in param_name:
return {'lr': 0.0}
return {'lr': 0.01}
adam = optim.Adam(per_param_args)
svi = SVI(model, guide, adam, loss=Trace_ELBO())
for _ in range(3):
svi.step()
model_unchanged = (torch.equal(pyro.param("alpha_p_log").data, self.alpha_p_log_0)) and\
(torch.equal(pyro.param("beta_p_log").data, self.beta_p_log_0))
guide_unchanged = (torch.equal(pyro.param("alpha_q_log").data, self.alpha_q_log_0)) and\
(torch.equal(pyro.param("beta_q_log").data, self.beta_q_log_0))
model_changed = not model_unchanged
guide_changed = not guide_unchanged
error = ('model' in fixed_parts and model_changed) or ('guide' in fixed_parts and guide_changed)
return (not error)
def test_model_fixed(self):
assert self.do_test_fixedness(fixed_parts=["model"])
def test_guide_fixed(self):
assert self.do_test_fixedness(fixed_parts=["guide"])
def test_guide_and_model_both_fixed(self):
assert self.do_test_fixedness(fixed_parts=["model", "guide"])
def test_guide_and_model_free(self):
assert self.do_test_fixedness(fixed_parts=["bogus_tag"])
@pytest.mark.stage("integration", "integration_batch_2")
class PoissonGammaTests(TestCase):
def setUp(self):
# poisson-gamma model
# gamma prior hyperparameter
self.alpha0 = torch.tensor(1.0)
# gamma prior hyperparameter
self.beta0 = torch.tensor(1.0)
self.data = torch.tensor([1.0, 2.0, 3.0])
self.n_data = len(self.data)
data_sum = self.data.sum(0)
self.alpha_n = self.alpha0 + data_sum # posterior alpha
self.beta_n = self.beta0 + torch.tensor(float(self.n_data)) # posterior beta
self.sample_batch_size = 2
def test_elbo_reparameterized(self):
self.do_elbo_test(True, 10000, Trace_ELBO())
def test_elbo_nonreparameterized(self):
self.do_elbo_test(False, 25000, Trace_ELBO())
def test_renyi_reparameterized(self):
self.do_elbo_test(True, 5000, RenyiELBO(num_particles=2))
def test_renyi_nonreparameterized(self):
self.do_elbo_test(False, 12500, RenyiELBO(alpha=0.2, num_particles=2))
def test_rws_reparameterized(self):
self.do_elbo_test(True, 5000, ReweightedWakeSleep(num_particles=2))
def test_rws_nonreparameterized(self):
self.do_elbo_test(False, 12500, ReweightedWakeSleep(num_particles=2))
def test_mmd_vectorized(self):
z_size = 1
self.do_fit_prior_test(
True, 500, Trace_MMD(
kernel=kernels.RBF(
z_size,
lengthscale=torch.sqrt(torch.tensor(z_size, dtype=torch.float))
), vectorize_particles=True, num_particles=100
), debug=True, lr=0.09
)
def do_elbo_test(self, reparameterized, n_steps, loss):
pyro.clear_param_store()
Gamma = dist.Gamma if reparameterized else fakes.NonreparameterizedGamma
def model():
lambda_latent = pyro.sample("lambda_latent", Gamma(self.alpha0, self.beta0))
with pyro.plate("data", self.n_data):
pyro.sample("obs", dist.Poisson(lambda_latent), obs=self.data)
return lambda_latent
def guide():
alpha_q = pyro.param("alpha_q", self.alpha_n.detach() + math.exp(0.17),
constraint=constraints.positive)
beta_q = pyro.param("beta_q", self.beta_n.detach() / math.exp(0.143),
constraint=constraints.positive)
pyro.sample("lambda_latent", Gamma(alpha_q, beta_q))
adam = optim.Adam({"lr": .0002, "betas": (0.97, 0.999)})
svi = SVI(model, guide, adam, loss)
for k in range(n_steps):
svi.step()
assert_equal(pyro.param("alpha_q"), self.alpha_n, prec=0.2, msg='{} vs {}'.format(
pyro.param("alpha_q").detach().cpu().numpy(), self.alpha_n.detach().cpu().numpy()))
assert_equal(pyro.param("beta_q"), self.beta_n, prec=0.15, msg='{} vs {}'.format(
pyro.param("beta_q").detach().cpu().numpy(), self.beta_n.detach().cpu().numpy()))
def do_fit_prior_test(self, reparameterized, n_steps, loss, debug=False, lr=0.0002):
pyro.clear_param_store()
Gamma = dist.Gamma if reparameterized else fakes.NonreparameterizedGamma
def model():
with pyro.plate('samples', self.sample_batch_size):
pyro.sample(
"lambda_latent", Gamma(
torch.stack([torch.stack([self.alpha0])]*self.sample_batch_size),
torch.stack([torch.stack([self.beta0])]*self.sample_batch_size)
).to_event(1)
)
def guide():
alpha_q = pyro.param("alpha_q", self.alpha0.detach() + math.exp(0.17),
constraint=constraints.positive)
beta_q = pyro.param("beta_q", self.beta0.detach() / math.exp(0.143),
constraint=constraints.positive)
with pyro.plate('samples', self.sample_batch_size):
pyro.sample(
"lambda_latent", Gamma(
torch.stack([torch.stack([alpha_q])]*self.sample_batch_size),
torch.stack([torch.stack([beta_q])]*self.sample_batch_size)
).to_event(1)
)
adam = optim.Adam({"lr": lr, "betas": (0.97, 0.999)})
svi = SVI(model, guide, adam, loss)
alpha = 0.99
for k in range(n_steps):
svi.step()
if debug:
alpha_error = param_mse("alpha_q", self.alpha0)
beta_error = param_mse("beta_q", self.beta0)
with torch.no_grad():
if k == 0:
avg_loglikelihood, avg_penalty = loss._differentiable_loss_parts(model, guide, (), {})
avg_loglikelihood = torch_item(avg_loglikelihood)
avg_penalty = torch_item(avg_penalty)
loglikelihood, penalty = loss._differentiable_loss_parts(model, guide, (), {})
avg_loglikelihood = alpha * avg_loglikelihood + (1-alpha) * torch_item(loglikelihood)
avg_penalty = alpha * avg_penalty + (1-alpha) * torch_item(penalty)
if k % 100 == 0:
print(alpha_error, beta_error)
print(avg_loglikelihood, avg_penalty)
print()
assert_equal(pyro.param("alpha_q"), self.alpha0, prec=0.2, msg='{} vs {}'.format(
pyro.param("alpha_q").detach().cpu().numpy(), self.alpha0.detach().cpu().numpy()))
assert_equal(pyro.param("beta_q"), self.beta0, prec=0.15, msg='{} vs {}'.format(
pyro.param("beta_q").detach().cpu().numpy(), self.beta0.detach().cpu().numpy()))
@pytest.mark.stage("integration", "integration_batch_1")
@pytest.mark.parametrize('elbo_impl', [
xfail_param(JitTrace_ELBO, reason="incorrect gradients", run=False),
xfail_param(JitTraceGraph_ELBO, reason="incorrect gradients", run=False),
xfail_param(JitTraceEnum_ELBO, reason="incorrect gradients", run=False),
Trace_ELBO,
TraceGraph_ELBO,
TraceEnum_ELBO,
RenyiELBO,
ReweightedWakeSleep
])
@pytest.mark.parametrize('gamma_dist,n_steps', [
(dist.Gamma, 5000),
(fakes.NonreparameterizedGamma, 10000),
(ShapeAugmentedGamma, 5000),
], ids=['reparam', 'nonreparam', 'rsvi'])
def test_exponential_gamma(gamma_dist, n_steps, elbo_impl):
pyro.clear_param_store()
# gamma prior hyperparameter
alpha0 = torch.tensor(1.0)
# gamma prior hyperparameter
beta0 = torch.tensor(1.0)
n_data = 2
data = torch.tensor([3.0, 2.0]) # two observations
alpha_n = alpha0 + torch.tensor(float(n_data)) # posterior alpha
beta_n = beta0 + torch.sum(data) # posterior beta
prec = 0.2 if gamma_dist.has_rsample else 0.25
def model(alpha0, beta0, alpha_n, beta_n):
lambda_latent = pyro.sample("lambda_latent", gamma_dist(alpha0, beta0))
with pyro.plate("data", n_data):
pyro.sample("obs", dist.Exponential(lambda_latent), obs=data)
return lambda_latent
def guide(alpha0, beta0, alpha_n, beta_n):
alpha_q = pyro.param("alpha_q", alpha_n * math.exp(0.17), constraint=constraints.positive)
beta_q = pyro.param("beta_q", beta_n / math.exp(0.143), constraint=constraints.positive)
pyro.sample("lambda_latent", gamma_dist(alpha_q, beta_q))
adam = optim.Adam({"lr": .0003, "betas": (0.97, 0.999)})
if elbo_impl is RenyiELBO:
elbo = elbo_impl(alpha=0.2, num_particles=3, max_plate_nesting=1, strict_enumeration_warning=False)
elif elbo_impl is ReweightedWakeSleep:
if gamma_dist is ShapeAugmentedGamma:
pytest.xfail(reason="ShapeAugmentedGamma not suported for ReweightedWakeSleep")
else:
elbo = elbo_impl(num_particles=3, max_plate_nesting=1, strict_enumeration_warning=False)
else:
elbo = elbo_impl(max_plate_nesting=1, strict_enumeration_warning=False)
svi = SVI(model, guide, adam, loss=elbo)
with xfail_if_not_implemented():
for k in range(n_steps):
svi.step(alpha0, beta0, alpha_n, beta_n)
assert_equal(pyro.param("alpha_q"), alpha_n, prec=prec, msg='{} vs {}'.format(
pyro.param("alpha_q").detach().cpu().numpy(), alpha_n.detach().cpu().numpy()))
assert_equal(pyro.param("beta_q"), beta_n, prec=prec, msg='{} vs {}'.format(
pyro.param("beta_q").detach().cpu().numpy(), beta_n.detach().cpu().numpy()))
@pytest.mark.stage("integration", "integration_batch_2")
class BernoulliBetaTests(TestCase):
def setUp(self):
# bernoulli-beta model
# beta prior hyperparameter
self.alpha0 = torch.tensor(1.0)
self.beta0 = torch.tensor(1.0) # beta prior hyperparameter
self.data = torch.tensor([0.0, 1.0, 1.0, 1.0])
self.n_data = len(self.data)
self.batch_size = 4
data_sum = self.data.sum()
self.alpha_n = self.alpha0 + data_sum # posterior alpha
self.beta_n = self.beta0 - data_sum + torch.tensor(float(self.n_data))
# posterior beta
self.log_alpha_n = torch.log(self.alpha_n)
self.log_beta_n = torch.log(self.beta_n)
self.sample_batch_size = 2
def test_elbo_reparameterized(self):
self.do_elbo_test(True, 10000, Trace_ELBO())
def test_elbo_nonreparameterized(self):
self.do_elbo_test(False, 10000, Trace_ELBO())
# this is used to detect bugs related to https://github.com/pytorch/pytorch/issues/9521
def test_elbo_reparameterized_vectorized(self):
self.do_elbo_test(True, 5000, Trace_ELBO(num_particles=2, vectorize_particles=True,
max_plate_nesting=1))
# this is used to detect bugs | |
<filename>pybind/slxos/v16r_1_00b/brocade_fcoe_ext_rpc/fcoe_get_login/input/__init__.py<gh_stars>0
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-fcoe-ext - based on the path /brocade_fcoe_ext_rpc/fcoe-get-login/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__fcoe_login_interface','__fcoe_login_vfid','__fcoe_login_vlan','__fcoe_login_rbridge_id',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__fcoe_login_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'2..4090']}), is_leaf=True, yang_name="fcoe-login-vlan", rest_name="fcoe-login-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vlan-type', is_config=True)
self.__fcoe_login_rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..239']}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="fcoe-login-rbridge-id", rest_name="fcoe-login-rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='common-def:rbridge-id-all-type', is_config=True)
self.__fcoe_login_vfid = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1..4096']}), is_leaf=True, yang_name="fcoe-login-vfid", rest_name="fcoe-login-vfid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vfid-type', is_config=True)
self.__fcoe_login_interface = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..32']}), is_leaf=True, yang_name="fcoe-login-interface", rest_name="fcoe-login-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:interface-fcoe-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_fcoe_ext_rpc', u'fcoe-get-login', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'fcoe-get-login', u'input']
def _get_fcoe_login_interface(self):
"""
Getter method for fcoe_login_interface, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_interface (fcoe:interface-fcoe-type)
YANG Description: This specifies the fcoe interface for which this
rpc function is invoked. In response to this
request, the managed device returns the list of all
FCoE devices that have logged in on this
interface.
"""
return self.__fcoe_login_interface
def _set_fcoe_login_interface(self, v, load=False):
"""
Setter method for fcoe_login_interface, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_interface (fcoe:interface-fcoe-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_interface() directly.
YANG Description: This specifies the fcoe interface for which this
rpc function is invoked. In response to this
request, the managed device returns the list of all
FCoE devices that have logged in on this
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..32']}), is_leaf=True, yang_name="fcoe-login-interface", rest_name="fcoe-login-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:interface-fcoe-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_interface must be of a type compatible with fcoe:interface-fcoe-type""",
'defined-type': "fcoe:interface-fcoe-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..32']}), is_leaf=True, yang_name="fcoe-login-interface", rest_name="fcoe-login-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:interface-fcoe-type', is_config=True)""",
})
self.__fcoe_login_interface = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_interface(self):
self.__fcoe_login_interface = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'3..32']}), is_leaf=True, yang_name="fcoe-login-interface", rest_name="fcoe-login-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:interface-fcoe-type', is_config=True)
def _get_fcoe_login_vfid(self):
"""
Getter method for fcoe_login_vfid, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_vfid (fcoe:fcoe-vfid-type)
YANG Description: This specifies the virtual fabric id for which
this rpc function is invoked. In response to this
request, the managed device returns a list of all
FCoE devices that have logged in to this virtual
fabric.
"""
return self.__fcoe_login_vfid
def _set_fcoe_login_vfid(self, v, load=False):
"""
Setter method for fcoe_login_vfid, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_vfid (fcoe:fcoe-vfid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_vfid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_vfid() directly.
YANG Description: This specifies the virtual fabric id for which
this rpc function is invoked. In response to this
request, the managed device returns a list of all
FCoE devices that have logged in to this virtual
fabric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1..4096']}), is_leaf=True, yang_name="fcoe-login-vfid", rest_name="fcoe-login-vfid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vfid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_vfid must be of a type compatible with fcoe:fcoe-vfid-type""",
'defined-type': "fcoe:fcoe-vfid-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1..4096']}), is_leaf=True, yang_name="fcoe-login-vfid", rest_name="fcoe-login-vfid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vfid-type', is_config=True)""",
})
self.__fcoe_login_vfid = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_vfid(self):
self.__fcoe_login_vfid = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1..4096']}), is_leaf=True, yang_name="fcoe-login-vfid", rest_name="fcoe-login-vfid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vfid-type', is_config=True)
def _get_fcoe_login_vlan(self):
"""
Getter method for fcoe_login_vlan, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_vlan (fcoe:fcoe-vlan-type)
YANG Description: This specifies the vlan id for which this rpc
function is invoked. In response to this request,
the managed device returns the list of all FCoE
devices that have logged in on this vlan.
"""
return self.__fcoe_login_vlan
def _set_fcoe_login_vlan(self, v, load=False):
"""
Setter method for fcoe_login_vlan, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_vlan (fcoe:fcoe-vlan-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_vlan() directly.
YANG Description: This specifies the vlan id for which this rpc
function is invoked. In response to this request,
the managed device returns the list of all FCoE
devices that have logged in on this vlan.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'2..4090']}), is_leaf=True, yang_name="fcoe-login-vlan", rest_name="fcoe-login-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vlan-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_vlan must be of a type compatible with fcoe:fcoe-vlan-type""",
'defined-type': "fcoe:fcoe-vlan-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'2..4090']}), is_leaf=True, yang_name="fcoe-login-vlan", rest_name="fcoe-login-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vlan-type', is_config=True)""",
})
self.__fcoe_login_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_vlan(self):
self.__fcoe_login_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'2..4090']}), is_leaf=True, yang_name="fcoe-login-vlan", rest_name="fcoe-login-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='fcoe:fcoe-vlan-type', is_config=True)
def _get_fcoe_login_rbridge_id(self):
"""
Getter method for fcoe_login_rbridge_id, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_rbridge_id (common-def:rbridge-id-all-type)
YANG Description: This specifies the rbridge-id for which this rpc
function is invoked. In response to this request,
the managed device returns a list of all FCoE
devices that have logged in to given rbridge or
all rbridges if rbridge-id value is 'all'
"""
return self.__fcoe_login_rbridge_id
def _set_fcoe_login_rbridge_id(self, v, load=False):
"""
Setter method for fcoe_login_rbridge_id, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/input/fcoe_login_rbridge_id (common-def:rbridge-id-all-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_rbridge_id() directly.
YANG Description: This specifies the rbridge-id for which this rpc
function is invoked. In response to this request,
the managed device returns a list of all FCoE
devices that have logged in to given rbridge or
all rbridges if rbridge-id value is 'all'
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..239']}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="fcoe-login-rbridge-id", rest_name="fcoe-login-rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='common-def:rbridge-id-all-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_rbridge_id must be of a type compatible with common-def:rbridge-id-all-type""",
'defined-type': "common-def:rbridge-id-all-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..239']}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="fcoe-login-rbridge-id", rest_name="fcoe-login-rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='common-def:rbridge-id-all-type', is_config=True)""",
})
self.__fcoe_login_rbridge_id = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_rbridge_id(self):
self.__fcoe_login_rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..239']}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="fcoe-login-rbridge-id", rest_name="fcoe-login-rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='common-def:rbridge-id-all-type', is_config=True)
fcoe_login_interface = __builtin__.property(_get_fcoe_login_interface, _set_fcoe_login_interface)
fcoe_login_vfid = __builtin__.property(_get_fcoe_login_vfid, _set_fcoe_login_vfid)
fcoe_login_vlan = __builtin__.property(_get_fcoe_login_vlan, _set_fcoe_login_vlan)
fcoe_login_rbridge_id = __builtin__.property(_get_fcoe_login_rbridge_id, _set_fcoe_login_rbridge_id)
_pyangbind_elements = {'fcoe_login_interface': | |
'''
/‾/ /‾/‾‾‾‾‾‾‾‾/‾| /‾| /‾/
/ /___/ /‾‾‾/ /‾‾/ |/ | / /
/ ___ / / / / | | / |/ /
/ / / / / / / /|__/| | /____
/_/___/_/_ /_/ / / _|_|_____/
/ _____/‾/ /‾//‾| /‾/ / |\ /‾/
/ <_____\ \/ // |/ /‾‾/ /‾/ / | \/ /
\______ \\ // | | / / / / /| |> <
______/ // // /| / / / / /_| | /\ \
/________//_//_/_|_/__/_/ /_//___|/ \_\_______ _______
/‾/ /‾/‾/ _____/‾/ /‾/ / /‾/ _____/ / /‾/‾‾‾‾‾‾/ ____/‾‾‾‾\
/ /___/ / / /____/ /___/ / / / / /____/ /___/ /‾‾/ /‾/ /__ / /‾) /
/ ___ / / //_ / ___ / / / / //_ / ___ / / / / ___// ‾‾ ⁄
/ / / / / /__/ / / / / /___/ / /__/ / / / / / / / /___/ /\ \
/_/ /_/_/______/_/ /_/_____/_/______/_/ /_/ /_/ /______/__/ \__\
'''
import re
import os
##try:
## from pip import main as pipmain
##except:
## from pip._internal.main import main as pipmain
import subprocess
from subprocess import *
##import requests
##import io
##import zipfile
from urllib import request
import requests
def download_extract_zip(url):
"""
Download a ZIP file and extract its contents in memory
yields (filename, file-like object) pairs
"""
response = requests.get(url)
with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
for zipinfo in thezip.infolist():
with thezip.open(zipinfo) as thefile:
yield zipinfo.filename, thefile
import sys
py = os.path.join(sys.exec_prefix,'python.exe')
def downloadZip(url,savePath):
remoteZip = request.urlopen(url)
local_file = open(savePath, 'wb')
local_file.write(remoteZip.read())
local_file.close()
def install(package):
if type(package) == list:
for p in package:
print(f"Installing {p}")
#call(['py', '-m', 'pip', 'install',package])
proc = subprocess.Popen([py, '-m', 'pip', 'install',p],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
(stdout, stderr) = proc.communicate()
#print(type(stderr))
if proc.returncode != 0:
print(str(stderr))
else:
print("success")
#pipmain(['install', package])
else:
#call(['py', '-m', 'pip', 'install',package])
print(f"Installing {package}")
proc = subprocess.Popen([py, '-m', 'pip', 'install',package],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
(stdout, stderr) = proc.communicate()
#print(type(stderr))
if proc.returncode != 0:
print(str(stderr))
else:
print("success")
#pipmain(['install', package])
from tkinter import *
##import requests
##import io
##import zipfile
##def download_extract_zip(url):
## """
## Download a ZIP file and extract its contents in memory
## yields (filename, file-like object) pairs
## """
## response = requests.get(url)
## with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
## for zipinfo in thezip.infolist():
## with thezip.open(zipinfo) as thefile:
## yield zipinfo.filename, thefile
try:
from tkhtmlview import HTMLLabel,HTMLScrolledText
import pyperclip as pc
except ModuleNotFoundError:
print('Running some initialization...')
pack = ['tkhtmlview','pyperclip']#['requests','Pillow','tkhtmlview',]
install(pack)
#py -m pip install requests
#py -m pip install Pillow
#pip install tkhtmlview
#py -m pip install tkhtmlview
from tkhtmlview import HTMLLabel,HTMLScrolledText
import pyperclip as pc
##THEMEpath = os.path.join(os.path.expanduser('~'),'Downloads','tcl-awthemes.zip')
##if not os.path.exists(THEMEpath):
## print('Retreving data...')
## #download_extract_zip('https://sourceforge.net/projects/tcl-awthemes/files/latest/download')
## downloadZip('https://sourceforge.net/projects/tcl-awthemes/files/latest/download',THEMEpath)
##try:
## from tkinterhtml import HTMLLabel
##except:
## print('Running some initialization...')
## print(' installing tkhtmlview...')
## #pipmain(['install','--upgrade','--force-reinstall','pip'])
## #install('Pillow')
## #install('tk_html_widgets')
##
## path = os.path.join(os.path.expanduser('~'),'Downloads','tkhtmlview-master.zip')
## #f = open(path,'w+')
## #call(["curl", "-O", "https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip"], stdout=f)
## #call(["curl -O https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip > tkhtmlview-master.zip"], shell=True)
## downloadZip("https://github.com/pozzolana93/tkhtmlview/archive/refs/heads/master.zip",path)
## #f.close()
## try:
## a = open(path)
## a.close()
## print('Created file at:',path)
## except:
## print('installation failure')
## install(os.path.join(os.path.expanduser('~'),'Downloads','tkhtmlview-master.zip'))
## #print('Installed')
## #print(' installing tkinter...')
## #install('tkinter')
## #print( help('modules'))
## from tkinterhtml import HTMLLabel
##
# Set the default input termination code
DEFAULT_INPUT_TERMINATION_CODE = '--Terminate--'
version='2.0.0'
print(f'v {version}')
Bold = ';font-weight: 900'
# Define the default colors
Border = '#eeeeee'
Background = '#ffffff'
Foreground = '#000000'
Comment = '#DD0000'
String = '#00aa00'
Keywords = '#ff7700'
Builtins = '#900090'
Definitions = '#0000ff'
Numbers = '#0000ff'
Errors = '#ff0000'
Output = '#0000ff'
lThemes = {'Python Default':{
'Border':'#555555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#DD0000',
'String':'#00aa00',
'Keywords':'#ff7700',
'Builtins':'#900090',
'Definitions':'#0000ff',
'Numbers':'#000000',
'Errors':'#ff0000'
},
'Python Bolded':{
'Border':'#555555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#DD0000',
'String':'#00aa00',
'Keywords':'#ff7700'+Bold,
'Builtins':'#900090',
'Definitions':'#0000ff'+Bold,
'Numbers':'#0000ff'+Bold,
'Errors':'#ff0000'
},
'Royal Blue':{
'Border':'#555566',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#00aa00',
'String':'#DD0000',
'Keywords':'#0000ff'+Bold,
'Builtins':'#000099',
'Definitions':'#000099'+Bold,
'Numbers':'#000099'+Bold,
'Errors':'#ff0000'
},
'Sea':{
'Border':'#445566',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#445a71',
'String':'#229922',
'Keywords':'#0077aa'+Bold,
'Builtins':'#D86149',
'Definitions':'#04B7BD'+Bold,
'Numbers':'#0077aa'+Bold,
'Errors':'#ff0000'
},
'Red':{
'Border':'#665555',
'Background':'#ffffff',
'Foreground':'#000000',
'Comment':'#445a71',
'String':'#22863A',
'Keywords':'#D73A49'+Bold,
'Builtins':'#6F42C1',
'Definitions':'#AB0112'+Bold,
'Numbers':'#AB0112'+Bold,
'Errors':'#ff0000'
},
'Grey':{
'Border':'#555555',
'Background':'#eeeeee',
'Foreground':'#7B776F',
'Comment':'#445a71',
'String':'#C05726',
'Keywords':'#3080B5'+Bold,
'Builtins':'#C05726',
'Definitions':'#ff7700'+Bold,
'Numbers':'#000077'+Bold,
'Errors':'#ff0000'
},
'Glisten':{
'Border':'#808080',
'Background':'#ffffff',
'Foreground':'#333333',
'Comment':'#445a71',
'String':'#DD4422',
'Keywords':'#008800'+Bold,
'Builtins':'#008800',
'Definitions':'#0066BB'+Bold,
'Numbers':'#0000dd'+Bold,
'Errors':'#ff0000'
}
}
dThemes = {'Default Dark':{
'Border':'#555555',
'Background':'#222222',
'Foreground':'#dddddd',
'Comment':'#ff3333',
'String':'#00ff00',
'Keywords':'#ffaa22'+Bold,
'Builtins':'#ff40c0',
'Definitions':'#00ccff'+Bold,
'Numbers':'#55aaff'+Bold,
'Errors':'#ff0000'
},
'Rainglow':{
'Border':'#5298c4',
'Background':'#040507',
'Foreground':'#ffffff',
'Comment':'#6f809f',
'String':'#64aeb3',
'Keywords':'#508aaa'+Bold,
'Builtins':'#6ab0a3',
'Definitions':'#00838C'+Bold,
'Numbers':'#529CA8'+Bold,
'Errors':'#ff0000'
},
'Bold':{
'Border':'#3d8e91',
'Background':'#0f0d0d',
'Foreground':'#ffffff',
'Comment':'#6f809f',
'String':'#F7A21B',
'Keywords':'#F0624B'+Bold,
'Builtins':'#3D8E91',
'Definitions':'#B4B7AD'+Bold,
'Numbers':'#F7A21B'+Bold,
'Errors':'#ff0000'
},
'Dark+':{
'Border':'#555555',
'Background':'#1e1e1e',
'Foreground':'#D4D4D4',
'Comment':'#6A9955',
'String':'#CE9178',
'Keywords':'#569CD6'+Bold,
'Builtins':'#dcdcaa',
'Definitions':'#9CDCFE'+Bold,
'Numbers':'#B5CEA8'+Bold,
'Errors':'#ff0000'
},
'Citylights':{
'Border':'#171d23',
'Background':'#1d252c',
'Foreground':'#718CA1',
'Comment':'#72899e',
'String':'#68A1F0',
'Keywords':'#508aaa'+Bold,
'Builtins':'#70E1E8',
'Definitions':'#24A5AF'+Bold,
'Numbers':'#E27E8D'+Bold,
'Errors':'#ff0000'
},
'Panda':{
'Border':'#676B79',
'Background':'#292a2b',
'Foreground':'#E6E6E6',
'Comment':'#737787',
'String':'#19F9D8',
'Keywords':'#FFB86C'+Bold,
'Builtins':'#FF75B5',
'Definitions':'#6FC1FF'+Bold,
'Numbers':'#FFB86C'+Bold,
'Errors':'#ff0000'
},
'Rose':{
'Border':'#f52277',
'Background':'#141322',
'Foreground':'#B4DAE9',
'Comment':'#45898C',
'String':'#C01B5D',
'Keywords':'#FB4293'+Bold,
'Builtins':'#F37AB0',
'Definitions':'#8CE1E7'+Bold,
'Numbers':'#E5FCA6'+Bold,
'Errors':'#ff0000'
},
'Sea Green':{
'Border':'#242b38',
'Background':'#0a1018',
'Foreground':'#EEFFFF',
'Comment':'#708394',
'String':'#28735E',
'Keywords':'#25A2A6'+Bold,
'Builtins':'#5CB4DE',
'Definitions':'#4785bd'+Bold,
'Numbers':'#28735E'+Bold,
'Errors':'#ff0000'
},
'Firefly':{
'Border':'#333333',
'Background':'#0a0a0a',
'Foreground':'#a8aebd',
'Comment':'#626a73',
'String':'#a4bd00',
'Keywords':'#ff0066'+Bold,
'Builtins':'#ff8533',
'Definitions':'#827db5'+Bold,
'Numbers':'#E6E600'+Bold,
'Errors':'#ff0000'
},
'Monikai':{
'Border':'#333333',
'Background':'#121212',
'Foreground':'#bbbbbb',
'Comment':'#5C6370',
'String':'#E5C07B',
'Keywords':'#56B6C2'+Bold,
'Builtins':'#E06C75',
'Definitions':'#98C379'+Bold,
'Numbers':'#C678DD'+Bold,
'Errors':'#ff0000'
},
'Black Ocean':{
'Border':'#0c5271',
'Background':'#101316',
'Foreground':'#DFDFDF',
'Comment':'#60778c',
'String':'#7ebea0',
'Keywords':'#007aae'+Bold,
'Builtins':'#019d76',
'Definitions':'#15b8ae'+Bold,
'Numbers':'#15b8ae'+Bold,
'Errors':'#ff0000'
},
'CodePen':{
'Border':'#9CA0B1',
'Background':'#1e1f27',
'Foreground':'#D5D7DE',
'Comment':'#88AFBF',
'String':'#2BC7B9',
'Keywords':'#47CF73'+Bold,
'Builtins':'#5E91F2',
'Definitions':'#9CA0B1'+Bold,
'Numbers':'#2BC7B9'+Bold,
'Errors':'#ff0000'
},
'Acme':{
'Border':'#a2a797',
'Background':'#0f0e0d',
'Foreground':'#EDEBE6',
'Comment':'#988e86',
'String':'#E0A84C',
'Keywords':'#CF433E'+Bold,
'Builtins':'#CD122C',
'Definitions':'#d96972'+Bold,
'Numbers':'#a5e3d0'+Bold,
'Errors':'#ff0000'
},
'Arc':{
'Border':'#3c3c3c',
'Background':'#111111',
'Foreground':'#EDEBE6',
'Comment':'#888888',
'String':'#ff1ba9',
'Keywords':'#f28888'+Bold,
'Builtins':'#a80000',
'Definitions':'#A5E3D0'+Bold,
'Numbers':'#a5e3d0'+Bold,
'Errors':'#ff0000'
}
}
# Define the syntax separators
separators = [',','(','[',']',')','{','}','/','*','+','-','.','|',':',';','⋞','⋟','=']
# Create a splitting function that takes a list of separators
# Code courtesy of DelftStack: https://www.delftstack.com/howto/python/how-to-split-string-with-multiple-delimiters-in-python/#make-it-a-function - I edited it some though
def custom_split(sepr_list, str_to_split):
# Duplicate and sort the separator list
sepr_list2 = list(sepr_list)
sepr_list2.sort(key=len,reverse=True)
# create regular expression dynamically
regular_exp = '|'.join(map(re.escape, sepr_list2))
# Return the list of splits
return re.split(regular_exp, str_to_split)
def Input(prompt=None,ml = False,Terminate=DEFAULT_INPUT_TERMINATION_CODE):
contents = input(prompt)
while ml:
try:
line = input()
if line == Terminate:
break
contents+='\n'+line
except EOFError:
break
#contents+='\n'+line
return contents
# create a list of instances that would set off a string with single quotes
str1 = ['\'', 'f\'','u\'','b\'','r\'','\'\'\'','f\'\'\'','u\'\'\'','b\'\'\'','r\'\'\'']
# create a list of instances that would set off a string with double quotes
str2 = ['"', 'r"','f"','u"','b"','"""','f"""','u"""','b"""','r"""']
#Create a dictionary with keywords and the like
Colors = {
#Comment : ['#','##'],
#String : ['\'', '"','f\'','u\'','b\'','r\'','r"','f"','u"','b"'],
'Keywords' : ['and','as','assert','break','class','continue','def','del','elif','else','except','False','finally','for','from','global','if','import','in','is','lambda','None','nonlocal',
'not','or','pass','raise','return','True','try','while','with','yield'],
'Builtins' : ['ValueError', 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BlockingIOError', 'BrokenPipeError', 'BufferError', 'BytesWarning', 'ChildProcessError',
'ConnectionAbortedError', 'ConnectionError', 'ConnectionRefusedError', 'ConnectionResetError', 'DeprecationWarning', 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception',
'FileExistsError', 'FileNotFoundError', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError',
'InterruptedError', 'IsADirectoryError', 'KeyError', 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'ModuleNotFoundError', 'NameError', 'NotADirectoryError', 'NotImplemented',
'NotImplementedError', 'OSError', 'OverflowError', 'PendingDeprecationWarning', 'PermissionError', 'ProcessLookupError', 'ReferenceError', 'RecursionError', 'ResourceWarning',
'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration', 'StopIteration', 'SystemError', 'SyntaxError', 'SyntaxWarning', 'SystemExit', 'TabError', 'TimeoutError', 'TypeError',
'UnicodeDecodeError', 'UnboundLocalError', 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', 'WindowsError',
'ZeroDivisionError', 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'breakpoint', 'bytearray', 'callable', 'chr', 'classmethod', 'compile', 'complex', 'copyright', 'credits', 'delattr',
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'exec', 'exit', 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input', 'int',
'isinstance', 'issubclass', 'iter', 'len', 'license', 'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', 'property','quit', 'range',
'repr', 'reversed', 'round', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', 'vars', 'zip'],
'Numbers' : [],
'Error' : [],
'Output' : []
}
# Create a dictionary with syntax ends - blank for continuous highlighting
ends = {
'Comment': '',
'String': '',
'Keywords': '</span>',
'Builtins': '</span>',
'Numbers': '</span>'
}
# Create a dictionary with string to be replaced
Replacers = {'<':'⋞',
'>':'⋟',
'<p>':'',
'</p>':' <br> ',
'\n':' <br> '}
ColorToType = {
"Border" : Border,
"Background" : Background,
"Foreground" : Foreground,
"Comment" : Comment,
"String" : String,
"Keywords" : Keywords,
"Builtins" : Builtins,
"Definitions" : Definitions,
"Numbers" : Numbers,
"Error" : Errors,
"Output" : Output
}
# Create A Theme setter
def setColorScheme(Color):
try:
Color = int(Color)
except:
pass
try:
if type(Color) == type(2):
theme = Themes[list(Themes)[Color-1]]
else:
theme = Themes[Color]
global Border, Background, Foreground, Comment, String, Keywords, Builtins, Definitions, Colors, Numbers, Errors, Output
Border = theme['Border']
Background = theme['Background']
Foreground = theme['Foreground']
Comment = theme['Comment']
String = theme['String']
Keywords = theme['Keywords']
Builtins = theme['Builtins']
Definitions = theme['Definitions']
Numbers = theme['Numbers']
Errors = theme['Errors']
Output = theme['Definitons']
except:
print('Invalid Theme')
#OLD
def Preview(code):
import datetime
import webbrowser
Path = os.path.join(os.path.expanduser('~'),'Downloads','Html - CodeSnippets')
if not os.path.exists(Path):
os.makedirs(Path)
Name = ('Code Snippet.html')# - '+str(datetime.datetime.now())[:-10].replace(':','.')+'.html')
fn = os.path.join(Path,Name)
f = open(fn, 'w+', errors='replace')
f.write(code)
f.close()
webbrowser.open_new(fn)
#NEW
def Compute(Code,color,inline):
print(color)
if color=='':
color = list(Themes)[0]
color = list(Themes).index(color)+1
print(color)
inline = bool(inline)
Raw = htmlCode(Code,color=color,inline=inline)
global root,clicked,drop,text_box,OutText_box,OUTPUT
OutText_box.config(state='normal')
OutText_box.delete(1.0, 'end')
OutText_box.insert(1.0, Raw.code)
OutText_box.config(state='disabled')
if inline:
a = Raw.code
bg = Raw.code[106:113]
else:
a = Raw.code[8:]
bg = Raw.code[184:191]
OUTPUT['bg'] = bg
OUTPUT.set_html(a)
print(bg)
import random
from tkinter import scrolledtext,ttk
from functools import partial
import win32gui
#from io | |
<reponame>jacobrask/critic<gh_stars>0
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import dbutils
import diff
import diff.parse
import gitutils
import itertools
import changeset.load as changeset_load
import htmlutils
import re
import page.utils
from htmlutils import jsify
from time import strftime
from reviewing.filters import Filters
from operation import OperationFailure
class Comment:
def __init__(self, chain, batch_id, id, state, user, time, when, comment, code, unread):
self.chain = chain
self.batch_id = batch_id
self.id = id
self.state = state
self.user = user
self.time = time
self.when = when
self.comment = comment
self.code = code
self.unread = unread
def __repr__(self):
return "Comment(%r)" % self.comment
def getJSConstructor(self):
return "new Comment(%d, %s, %s, %s, %s)" % (self.id, self.user.getJSConstructor(), jsify(strftime("%Y-%m-%d %H:%M", self.time.timetuple())), jsify(self.state), jsify(self.comment))
@staticmethod
def fromId(db, id, user):
cursor = db.cursor()
cursor.execute("SELECT chain, batch, uid, time, state, comment, code FROM comments WHERE id=%s", (id,))
row = cursor.fetchone()
if not row: return None
else:
chain_id, batch_id, author_id, time, state, comment, code = row
author = dbutils.User.fromId(db, author_id)
adjusted_time = user.adjustTimestamp(db, time)
when = user.formatTimestamp(db, time)
cursor.execute("SELECT 1 FROM commentstoread WHERE uid=%s AND comment=%s", (user.id, id))
return Comment(CommentChain.fromId(db, chain_id, user), batch_id, id, state, author, adjusted_time, when, comment, code, cursor.fetchone() is not None)
class CommentChain:
def __init__(self, id, user, review, batch_id, type, state, origin=None, file_id=None, first_commit=None, last_commit=None, closed_by=None, addressed_by=None, type_is_draft=False, state_is_draft=False, last_commit_is_draft=False, addressed_by_is_draft=False, leader=None, count=None, unread=None):
self.id = id
self.user = user
self.review = review
self.batch_id = batch_id
self.type = type
self.type_is_draft = type_is_draft
self.state = state
self.state_is_draft = state_is_draft
self.origin = origin
self.file_id = file_id
self.first_commit = first_commit
self.last_commit = last_commit
self.last_commit_is_draft = last_commit_is_draft
self.closed_by = closed_by
self.addressed_by = addressed_by
self.addressed_by_is_draft = addressed_by_is_draft
self.lines = None
self.lines_by_sha1 = None
self.__leader = leader
self.__count = count
self.__unread = unread
self.comments = []
def setLines(self, sha1, offset, count):
if not self.lines:
self.lines = []
self.lines_by_sha1 = {}
assert sha1 not in self.lines
self.lines.append((sha1, offset, count))
self.lines_by_sha1[sha1] = (offset, count)
return self
def loadComments(self, db, user, include_draft_comments=True):
if include_draft_comments:
if self.state == "draft":
draft_user_id = self.user.id
else:
draft_user_id = user.id
else:
draft_user_id = None
cursor = db.cursor()
cursor.execute("""SELECT comments.id,
comments.batch,
comments.state,
comments.uid,
comments.time,
comments.comment,
comments.code,
commentstoread.uid IS NOT NULL AS unread
FROM comments
LEFT OUTER JOIN commentstoread ON (comments.id=commentstoread.comment AND commentstoread.uid=%s)
WHERE comments.chain=%s
AND ((comments.state='draft' AND comments.uid=%s) OR comments.state='current')
ORDER BY comments.batch ASC""",
(user.id, self.id, draft_user_id))
last = None
for comment_id, batch_id, comment_state, author_id, time, comment, code, unread in cursor.fetchall():
author = dbutils.User.fromId(db, author_id)
adjusted_time = user.adjustTimestamp(db, time)
when = user.formatTimestamp(db, time)
comment = Comment(self, batch_id, comment_id, comment_state, author,
adjusted_time, when, comment, code, unread)
if comment_state == 'draft': last = comment
else: self.comments.append(comment)
if last: self.comments.append(last)
def when(self):
return self.comments[0].when
def countComments(self):
if self.__count is None:
self.__count = len(self.comments)
return self.__count
def countUnread(self):
if self.__unread is None:
self.__unread = len(filter(lambda comment: comment.unread, self.comments))
return self.__unread
def title(self, include_time=True):
if self.type == "issue":
result = "Issue raised by %s" % (self.user.fullname)
else:
result = "Note by %s" % (self.user.fullname)
if include_time:
result += " at %s" % self.when()
return result
def leader(self, max_length=80, text=False):
if self.__leader is None: self.__leader = self.comments[0].comment.split("\n", 1)[0]
if len(self.__leader) > max_length:
if text: return self.__leader[:max_length - 5] + "[...]"
else: return htmlutils.htmlify(self.__leader[:max_length - 3]) + "[…]"
else:
if text: return self.__leader
else: return htmlutils.htmlify(self.__leader)
def getJSConstructor(self, sha1=None):
if self.closed_by: closed_by = self.closed_by.getJSConstructor()
else: closed_by = "null"
if self.addressed_by: addressed_by = jsify(self.addressed_by.sha1)
else: addressed_by = "null"
comments = ", ".join(map(Comment.getJSConstructor, self.comments))
if sha1:
offset, count = self.lines_by_sha1[sha1]
if self.file_id:
lines = "new CommentLines(%d, %s, %d, %d)" % (self.file_id, jsify(sha1), offset, offset + count - 1)
else:
lines = "new CommentLines(null, %s, %d, %d)" % (jsify(sha1), offset, offset + count - 1)
else:
lines = "null"
return "new CommentChain(%d, %s, %s, %s, %s, %s, %s, [%s], %s)" % (self.id, self.user.getJSConstructor(), jsify(self.type), "true" if self.type_is_draft else "false", jsify(self.state), closed_by, addressed_by, comments, lines)
def __nonzero__(self):
return bool(self.comments)
def __eq__(self, other):
return other is not None and self.id == other.id
def __ne__(self, other):
return other is None or self.id != other.id
def __repr__(self):
return "CommentChain(%d)" % self.id
def __len__(self):
return len(self.comments)
def __getitem__(self, index):
return self.comments[index]
@staticmethod
def fromReview(db, review, user):
cursor = db.cursor()
cursor.execute("""SELECT commentchains.id, commentchains.batch,
users.id, users.name, users.fullname, users.status,
useremails.email, useremails.verified,
commentchains.type, drafttype.to_type,
commentchains.state, draftstate.to_state,
SUBSTR(comments.comment, 1, 81),
chaincomments(commentchains.id),
chainunread(commentchains.id, %s)
FROM commentchains
JOIN users ON (users.id=commentchains.uid)
JOIN useremails ON (useremails.id=users.email)
JOIN comments ON (comments.id=commentchains.first_comment)
LEFT OUTER JOIN commentchainchanges AS drafttype ON (drafttype.chain=commentchains.id
AND drafttype.uid=%s
AND drafttype.to_type IS NOT NULL
AND drafttype.state='draft')
LEFT OUTER JOIN commentchainchanges AS draftstate ON (draftstate.chain=commentchains.id
AND draftstate.uid=%s
AND draftstate.to_state IS NOT NULL
AND draftstate.state='draft')
WHERE commentchains.review=%s
AND (commentchains.state!='draft' or commentchains.uid=%s)
ORDER BY commentchains.id ASC""",
(user.id, user.id, user.id, review.id, user.id,))
chains = []
for chain_id, batch_id, user_id, user_name, user_fullname, user_status, user_email, user_email_verified, chain_type, draft_type, chain_state, draft_state, leader, count, unread in cursor:
if draft_type is not None: chain_type = draft_type
if draft_state is not None: chain_state = draft_state
if "\n" in leader: leader = leader[:leader.index("\n")]
chains.append(CommentChain(chain_id, dbutils.User(user_id, user_name, user_fullname, user_status, user_email, user_email_verified), review, batch_id, chain_type, chain_state, leader=leader, count=count, unread=unread))
return chains
@staticmethod
def fromId(db, id, user, review=None, skip=None):
cursor = db.cursor()
cursor.execute("SELECT review, batch, uid, type, state, origin, file, first_commit, last_commit, closed_by, addressed_by FROM commentchains WHERE id=%s", [id])
row = cursor.fetchone()
if not row:
return None
else:
review_id, batch_id, user_id, type, state, origin, file_id, first_commit_id, last_commit_id, closed_by_id, addressed_by_id = row
type_is_draft = False
state_is_draft = False
last_commit_is_draft = False
addressed_by_is_draft = False
if user is not None:
cursor.execute("""SELECT from_type, to_type,
from_state, to_state,
from_last_commit, to_last_commit,
from_addressed_by, to_addressed_by
FROM commentchainchanges
WHERE chain=%s
AND uid=%s
AND state='draft'""",
[id, user.id])
for from_type, to_type, from_state, to_state, from_last_commit_id, to_last_commit_id, from_addressed_by_id, to_addressed_by_id in cursor:
if from_state == state:
state = to_state
state_is_draft = True
if to_state != "open":
closed_by_id = user.id
if from_type == type:
type = to_type
type_is_draft = True
if from_last_commit_id == last_commit_id:
last_commit_id = from_last_commit_id
last_commit_is_draft = True
if from_addressed_by_id == addressed_by_id:
addressed_by_id = to_addressed_by_id
addressed_by_is_draft = True
if review is None:
review = dbutils.Review.fromId(db, review_id)
else:
assert review.id == review_id
first_commit = last_commit = addressed_by = None
if not skip or 'commits' not in skip:
if first_commit_id: first_commit = gitutils.Commit.fromId(db, review.repository, first_commit_id)
if last_commit_id: last_commit = gitutils.Commit.fromId(db, review.repository, last_commit_id)
if addressed_by_id: addressed_by = gitutils.Commit.fromId(db, review.repository, addressed_by_id)
if closed_by_id: closed_by = dbutils.User.fromId(db, closed_by_id)
else: closed_by = None
chain = CommentChain(id, dbutils.User.fromId(db, user_id), review,
batch_id, type, state, origin, file_id,
first_commit, last_commit, closed_by, addressed_by,
type_is_draft=type_is_draft,
state_is_draft=state_is_draft,
last_commit_is_draft=last_commit_is_draft,
addressed_by_is_draft=addressed_by_is_draft)
if not skip or 'lines' not in skip:
if chain.state == "draft":
draft_user_id = chain.user.id
elif user is not None:
draft_user_id = user.id
else:
draft_user_id = None
if draft_user_id is not None:
cursor.execute("""SELECT sha1, first_line, last_line
FROM commentchainlines
WHERE chain=%s
AND (state='current' OR uid=%s)""",
(id, draft_user_id))
for sha1, first_line, last_line in cursor.fetchall():
chain.setLines(sha1, first_line, last_line - first_line + 1)
return chain
def loadCommentChains(db, review, user, file=None, changeset=None, commit=None, local_comments_only=False):
result = []
cursor = db.cursor()
chain_ids = None
if file is None and changeset is None and commit is None:
cursor.execute("SELECT id FROM commentchains WHERE review=%s AND file IS NULL", [review.id])
elif commit is not None:
cursor.execute("""SELECT DISTINCT id
FROM commentchains
WHERE review=%s
AND file IS NULL
AND first_commit=%s
AND ((state!='draft' OR uid=%s)
AND state!='empty')
GROUP BY id""",
[review.id, commit.getId(db), user.id])
elif local_comments_only:
cursor.execute("""SELECT DISTINCT commentchains.id
FROM commentchains
JOIN commentchainlines ON (commentchainlines.chain=commentchains.id)
JOIN fileversions ON (fileversions.file=commentchains.file)
WHERE commentchains.review=%s
AND commentchains.file=%s
AND commentchains.state!='empty'
AND ((commentchains.first_commit=%s AND commentchains.last_commit=%s)
OR commentchains.addressed_by=%s)
AND fileversions.changeset=%s
AND (commentchainlines.sha1=fileversions.old_sha1
OR commentchainlines.sha1=fileversions.new_sha1)
AND (commentchainlines.state='current'
OR commentchainlines.uid=%s)
ORDER BY commentchains.id ASC""",
(review.id, file.id, changeset.parent.getId(db), changeset.child.getId(db), changeset.child.getId(db), changeset.id, user.id))
else:
chain_ids = set()
if | |
5127, 5128, 5134, 5133)
model.createElement(3440, 5368, 3038, 3039, 5374, 5128, 2998, 2999, 5134)
model.createElement(3441, 2362, 5369, 5375, 2361, 2322, 5129, 5135, 2321)
model.createElement(3442, 5369, 5370, 5376, 5375, 5129, 5130, 5136, 5135)
model.createElement(3443, 5370, 5371, 5377, 5376, 5130, 5131, 5137, 5136)
model.createElement(3444, 5371, 5372, 5378, 5377, 5131, 5132, 5138, 5137)
model.createElement(3445, 5372, 5373, 5379, 5378, 5132, 5133, 5139, 5138)
model.createElement(3446, 5373, 5374, 5380, 5379, 5133, 5134, 5140, 5139)
model.createElement(3447, 5374, 3039, 3040, 5380, 5134, 2999, 3000, 5140)
model.createElement(3448, 2361, 5375, 5381, 2360, 2321, 5135, 5141, 2320)
model.createElement(3449, 5375, 5376, 5382, 5381, 5135, 5136, 5142, 5141)
model.createElement(3450, 5376, 5377, 5383, 5382, 5136, 5137, 5143, 5142)
model.createElement(3451, 5377, 5378, 5384, 5383, 5137, 5138, 5144, 5143)
model.createElement(3452, 5378, 5379, 5385, 5384, 5138, 5139, 5145, 5144)
model.createElement(3453, 5379, 5380, 5386, 5385, 5139, 5140, 5146, 5145)
model.createElement(3454, 5380, 3040, 3041, 5386, 5140, 3000, 3001, 5146)
model.createElement(3455, 2360, 5381, 5387, 2359, 2320, 5141, 5147, 2319)
model.createElement(3456, 5381, 5382, 5388, 5387, 5141, 5142, 5148, 5147)
model.createElement(3457, 5382, 5383, 5389, 5388, 5142, 5143, 5149, 5148)
model.createElement(3458, 5383, 5384, 5390, 5389, 5143, 5144, 5150, 5149)
model.createElement(3459, 5384, 5385, 5391, 5390, 5144, 5145, 5151, 5150)
model.createElement(3460, 5385, 5386, 5392, 5391, 5145, 5146, 5152, 5151)
model.createElement(3461, 5386, 3041, 3042, 5392, 5146, 3001, 3002, 5152)
model.createElement(3462, 2359, 5387, 5393, 2358, 2319, 5147, 5153, 2318)
model.createElement(3463, 5387, 5388, 5394, 5393, 5147, 5148, 5154, 5153)
model.createElement(3464, 5388, 5389, 5395, 5394, 5148, 5149, 5155, 5154)
model.createElement(3465, 5389, 5390, 5396, 5395, 5149, 5150, 5156, 5155)
model.createElement(3466, 5390, 5391, 5397, 5396, 5150, 5151, 5157, 5156)
model.createElement(3467, 5391, 5392, 5398, 5397, 5151, 5152, 5158, 5157)
model.createElement(3468, 5392, 3042, 3043, 5398, 5152, 3002, 3003, 5158)
model.createElement(3469, 2358, 5393, 5399, 2357, 2318, 5153, 5159, 2317)
model.createElement(3470, 5393, 5394, 5400, 5399, 5153, 5154, 5160, 5159)
model.createElement(3471, 5394, 5395, 5401, 5400, 5154, 5155, 5161, 5160)
model.createElement(3472, 5395, 5396, 5402, 5401, 5155, 5156, 5162, 5161)
model.createElement(3473, 5396, 5397, 5403, 5402, 5156, 5157, 5163, 5162)
model.createElement(3474, 5397, 5398, 5404, 5403, 5157, 5158, 5164, 5163)
model.createElement(3475, 5398, 3043, 3044, 5404, 5158, 3003, 3004, 5164)
model.createElement(3476, 2357, 5399, 5405, 2356, 2317, 5159, 5165, 2316)
model.createElement(3477, 5399, 5400, 5406, 5405, 5159, 5160, 5166, 5165)
model.createElement(3478, 5400, 5401, 5407, 5406, 5160, 5161, 5167, 5166)
model.createElement(3479, 5401, 5402, 5408, 5407, 5161, 5162, 5168, 5167)
model.createElement(3480, 5402, 5403, 5409, 5408, 5162, 5163, 5169, 5168)
model.createElement(3481, 5403, 5404, 5410, 5409, 5163, 5164, 5170, 5169)
model.createElement(3482, 5404, 3044, 3045, 5410, 5164, 3004, 3005, 5170)
model.createElement(3483, 2356, 5405, 5411, 2355, 2316, 5165, 5171, 2315)
model.createElement(3484, 5405, 5406, 5412, 5411, 5165, 5166, 5172, 5171)
model.createElement(3485, 5406, 5407, 5413, 5412, 5166, 5167, 5173, 5172)
model.createElement(3486, 5407, 5408, 5414, 5413, 5167, 5168, 5174, 5173)
model.createElement(3487, 5408, 5409, 5415, 5414, 5168, 5169, 5175, 5174)
model.createElement(3488, 5409, 5410, 5416, 5415, 5169, 5170, 5176, 5175)
model.createElement(3489, 5410, 3045, 3046, 5416, 5170, 3005, 3006, 5176)
model.createElement(3490, 2355, 5411, 5417, 2354, 2315, 5171, 5177, 2314)
model.createElement(3491, 5411, 5412, 5418, 5417, 5171, 5172, 5178, 5177)
model.createElement(3492, 5412, 5413, 5419, 5418, 5172, 5173, 5179, 5178)
model.createElement(3493, 5413, 5414, 5420, 5419, 5173, 5174, 5180, 5179)
model.createElement(3494, 5414, 5415, 5421, 5420, 5174, 5175, 5181, 5180)
model.createElement(3495, 5415, 5416, 5422, 5421, 5175, 5176, 5182, 5181)
model.createElement(3496, 5416, 3046, 3047, 5422, 5176, 3006, 3007, 5182)
model.createElement(3497, 2354, 5417, 5423, 2353, 2314, 5177, 5183, 2313)
model.createElement(3498, 5417, 5418, 5424, 5423, 5177, 5178, 5184, 5183)
model.createElement(3499, 5418, 5419, 5425, 5424, 5178, 5179, 5185, 5184)
model.createElement(3500, 5419, 5420, 5426, 5425, 5179, 5180, 5186, 5185)
model.createElement(3501, 5420, 5421, 5427, 5426, 5180, 5181, 5187, 5186)
model.createElement(3502, 5421, 5422, 5428, 5427, 5181, 5182, 5188, 5187)
model.createElement(3503, 5422, 3047, 3048, 5428, 5182, 3007, 3008, 5188)
model.createElement(3504, 2353, 5423, 5429, 2352, 2313, 5183, 5189, 2312)
model.createElement(3505, 5423, 5424, 5430, 5429, 5183, 5184, 5190, 5189)
model.createElement(3506, 5424, 5425, 5431, 5430, 5184, 5185, 5191, 5190)
model.createElement(3507, 5425, 5426, 5432, 5431, 5185, 5186, 5192, 5191)
model.createElement(3508, 5426, 5427, 5433, 5432, 5186, 5187, 5193, 5192)
model.createElement(3509, 5427, 5428, 5434, 5433, 5187, 5188, 5194, 5193)
model.createElement(3510, 5428, 3048, 3049, 5434, 5188, 3008, 3009, 5194)
model.createElement(3511, 2352, 5429, 5435, 2351, 2312, 5189, 5195, 2311)
model.createElement(3512, 5429, 5430, 5436, 5435, 5189, 5190, 5196, 5195)
model.createElement(3513, 5430, 5431, 5437, 5436, 5190, 5191, 5197, 5196)
model.createElement(3514, 5431, 5432, 5438, 5437, 5191, 5192, 5198, 5197)
model.createElement(3515, 5432, 5433, 5439, 5438, 5192, 5193, 5199, 5198)
model.createElement(3516, 5433, 5434, 5440, 5439, 5193, 5194, 5200, 5199)
model.createElement(3517, 5434, 3049, 3050, 5440, 5194, 3009, 3010, 5200)
model.createElement(3518, 2351, 5435, 5441, 2350, 2311, 5195, 5201, 2310)
model.createElement(3519, 5435, 5436, 5442, 5441, 5195, 5196, 5202, 5201)
model.createElement(3520, 5436, 5437, 5443, 5442, 5196, 5197, 5203, 5202)
model.createElement(3521, 5437, 5438, 5444, 5443, 5197, 5198, 5204, 5203)
model.createElement(3522, 5438, 5439, 5445, 5444, 5198, 5199, 5205, 5204)
model.createElement(3523, 5439, 5440, 5446, 5445, 5199, 5200, 5206, 5205)
model.createElement(3524, 5440, 3050, 3051, 5446, 5200, 3010, 3011, 5206)
model.createElement(3525, 2350, 5441, 5447, 2349, 2310, 5201, 5207, 2309)
model.createElement(3526, 5441, 5442, 5448, 5447, 5201, 5202, 5208, 5207)
model.createElement(3527, 5442, 5443, 5449, 5448, 5202, 5203, 5209, 5208)
model.createElement(3528, 5443, 5444, 5450, 5449, 5203, 5204, 5210, 5209)
model.createElement(3529, 5444, 5445, 5451, 5450, 5204, 5205, 5211, 5210)
model.createElement(3530, 5445, 5446, 5452, 5451, 5205, 5206, 5212, 5211)
model.createElement(3531, 5446, 3051, 3052, 5452, 5206, 3011, 3012, 5212)
model.createElement(3532, 2349, 5447, 5453, 2348, 2309, 5207, 5213, 2308)
model.createElement(3533, 5447, 5448, 5454, 5453, 5207, 5208, 5214, 5213)
model.createElement(3534, 5448, 5449, 5455, 5454, 5208, 5209, 5215, 5214)
model.createElement(3535, 5449, 5450, 5456, 5455, 5209, 5210, 5216, 5215)
model.createElement(3536, 5450, 5451, 5457, 5456, 5210, 5211, 5217, 5216)
model.createElement(3537, 5451, 5452, 5458, 5457, 5211, 5212, 5218, 5217)
model.createElement(3538, 5452, 3052, 3053, 5458, 5212, 3012, 3013, 5218)
model.createElement(3539, 2348, 5453, 5459, 2347, 2308, 5213, 5219, 2307)
model.createElement(3540, 5453, 5454, 5460, 5459, 5213, 5214, 5220, 5219)
model.createElement(3541, 5454, 5455, 5461, 5460, 5214, 5215, 5221, 5220)
model.createElement(3542, 5455, 5456, 5462, 5461, 5215, 5216, 5222, 5221)
model.createElement(3543, 5456, 5457, 5463, 5462, 5216, 5217, 5223, 5222)
model.createElement(3544, 5457, 5458, 5464, 5463, 5217, 5218, 5224, 5223)
model.createElement(3545, 5458, 3053, 3054, 5464, 5218, 3013, 3014, 5224)
model.createElement(3546, 2347, 5459, 5465, 2346, 2307, 5219, 5225, 2306)
model.createElement(3547, 5459, 5460, 5466, 5465, 5219, 5220, 5226, 5225)
model.createElement(3548, 5460, 5461, 5467, 5466, 5220, 5221, 5227, 5226)
model.createElement(3549, 5461, 5462, 5468, 5467, 5221, 5222, 5228, 5227)
model.createElement(3550, 5462, 5463, 5469, 5468, 5222, 5223, 5229, 5228)
model.createElement(3551, 5463, 5464, 5470, 5469, 5223, 5224, 5230, 5229)
model.createElement(3552, 5464, 3054, 3055, 5470, 5224, 3014, 3015, 5230)
model.createElement(3553, 2346, 5465, 5471, 2345, 2306, 5225, 5231, 2305)
model.createElement(3554, 5465, 5466, 5472, 5471, 5225, 5226, 5232, 5231)
model.createElement(3555, 5466, 5467, 5473, 5472, 5226, 5227, 5233, 5232)
model.createElement(3556, 5467, 5468, 5474, 5473, 5227, 5228, 5234, 5233)
model.createElement(3557, 5468, 5469, 5475, 5474, 5228, 5229, 5235, 5234)
model.createElement(3558, 5469, 5470, 5476, 5475, 5229, 5230, 5236, 5235)
model.createElement(3559, 5470, 3055, 3056, 5476, 5230, 3015, 3016, 5236)
model.createElement(3560, 2345, 5471, 5477, 2344, 2305, 5231, 5237, 2304)
model.createElement(3561, 5471, 5472, 5478, 5477, 5231, 5232, 5238, 5237)
model.createElement(3562, 5472, 5473, 5479, 5478, 5232, 5233, 5239, 5238)
model.createElement(3563, 5473, 5474, 5480, 5479, 5233, 5234, 5240, 5239)
model.createElement(3564, 5474, 5475, 5481, 5480, 5234, 5235, 5241, 5240)
model.createElement(3565, 5475, 5476, 5482, 5481, 5235, 5236, 5242, 5241)
model.createElement(3566, 5476, 3056, 3057, 5482, 5236, 3016, 3017, 5242)
model.createElement(3567, 2344, 5477, 5483, 2343, 2304, 5237, 5243, 2303)
model.createElement(3568, 5477, 5478, 5484, 5483, 5237, 5238, 5244, 5243)
model.createElement(3569, 5478, 5479, 5485, 5484, 5238, 5239, 5245, 5244)
model.createElement(3570, 5479, 5480, 5486, 5485, 5239, 5240, 5246, 5245)
model.createElement(3571, 5480, 5481, 5487, 5486, 5240, 5241, 5247, 5246)
model.createElement(3572, 5481, 5482, 5488, 5487, 5241, 5242, 5248, 5247)
model.createElement(3573, 5482, 3057, 3058, 5488, 5242, 3017, 3018, 5248)
model.createElement(3574, 2343, 5483, 5489, 2342, 2303, 5243, 5249, 2302)
model.createElement(3575, 5483, 5484, 5490, 5489, 5243, 5244, 5250, 5249)
model.createElement(3576, 5484, 5485, 5491, 5490, 5244, 5245, 5251, 5250)
model.createElement(3577, 5485, 5486, 5492, 5491, 5245, 5246, 5252, 5251)
model.createElement(3578, 5486, 5487, 5493, 5492, 5246, 5247, 5253, 5252)
model.createElement(3579, 5487, 5488, 5494, 5493, 5247, 5248, 5254, 5253)
model.createElement(3580, 5488, 3058, 3059, 5494, 5248, 3018, 3019, 5254)
model.createElement(3581, 2342, 5489, 5495, 2341, 2302, 5249, 5255, 2301)
model.createElement(3582, 5489, 5490, 5496, 5495, 5249, 5250, 5256, 5255)
model.createElement(3583, 5490, 5491, 5497, 5496, 5250, 5251, 5257, 5256)
model.createElement(3584, 5491, 5492, 5498, 5497, 5251, 5252, 5258, 5257)
model.createElement(3585, 5492, 5493, 5499, 5498, 5252, 5253, 5259, 5258)
model.createElement(3586, 5493, 5494, 5500, 5499, 5253, 5254, 5260, 5259)
model.createElement(3587, 5494, 3059, 3060, 5500, 5254, 3019, 3020, 5260)
model.createElement(3588, 2341, 5495, 1112, 40, 2301, 5255, 1106, 39)
model.createElement(3589, 5495, 5496, 1113, 1112, 5255, 5256, 1107, 1106)
model.createElement(3590, 5496, 5497, 1114, 1113, 5256, 5257, 1108, 1107)
model.createElement(3591, 5497, 5498, 1115, 1114, 5257, 5258, 1109, 1108)
model.createElement(3592, 5498, 5499, 1116, 1115, 5258, 5259, 1110, 1109)
model.createElement(3593, 5499, 5500, 1117, 1116, 5259, 5260, 1111, 1110)
model.createElement(3594, 5500, 3060, 222, 1117, 5260, 3020, 223, 1111)
model.createElement(3595, 521, 3125, 5501, 2420, 522, 3119, 5261, 2380)
model.createElement(3596, 3125, 3126, 5502, 5501, 3119, 3120, 5262, 5261)
model.createElement(3597, 3126, 3127, 5503, 5502, 3120, 3121, 5263, 5262)
model.createElement(3598, 3127, 3128, 5504, 5503, 3121, 3122, 5264, 5263)
model.createElement(3599, 3128, 3129, 5505, 5504, | |
= 'annual mean in-canopy flow'
flow_set.units = 'm s-1'
flow_set[:, :] = 0
if self._map_output['tme']:
temp_set = self._map_data.createVariable('Tc', 'f8', ('time', 'nmesh2d_face'))
temp_set.long_name = 'annual mean coral temperature'
temp_set.units = 'K'
temp_set[:, :] = 0
low_temp_set = self._map_data.createVariable('Tlo', 'f8', ('time', 'nmesh2d_face'))
low_temp_set.long_name = 'annual mean lower thermal limit'
low_temp_set.units = 'K'
low_temp_set[:, :] = 0
high_temp_set = self._map_data.createVariable('Thi', 'f8', ('time', 'nmesh2d_face'))
high_temp_set.long_name = 'annual mean upper thermal limit'
high_temp_set.units = 'K'
high_temp_set[:, :] = 0
if self._map_output['pd']:
pd_set = self._map_data.createVariable('PD', 'f8', ('time', 'nmesh2d_face'))
pd_set.long_name = 'annual sum photosynthetic rate'
pd_set.units = '-'
pd_set[:, :] = 0
if self._map_output['ps']:
pt_set = self._map_data.createVariable('PT', 'f8', ('time', 'nmesh2d_face'))
pt_set.long_name = 'total living coral population at the end of the year'
pt_set.units = '-'
pt_set[:, :] = coral.living_cover
ph_set = self._map_data.createVariable('PH', 'f8', ('time', 'nmesh2d_face'))
ph_set.long_name = 'healthy coral population at the end of the year'
ph_set.units = '-'
ph_set[:, :] = coral.living_cover
pr_set = self._map_data.createVariable('PR', 'f8', ('time', 'nmesh2d_face'))
pr_set.long_name = 'recovering coral population at the end of the year'
pr_set.units = '-'
pr_set[:, :] = 0
pp_set = self._map_data.createVariable('PP', 'f8', ('time', 'nmesh2d_face'))
pp_set.long_name = 'pale coral population at the end of the year'
pp_set.units = '-'
pp_set[:, :] = 0
pb_set = self._map_data.createVariable('PB', 'f8', ('time', 'nmesh2d_face'))
pb_set.long_name = 'bleached coral population at the end of the year'
pb_set.units = '-'
pb_set[:, :] = 0
if self._map_output['calc']:
calc_set = self._map_data.createVariable('calc', 'f8', ('time', 'nmesh2d_face'))
calc_set.long_name = 'annual sum calcification rate'
calc_set.units = 'kg m-2 yr-1'
calc_set[:, :] = 0
if self._map_output['md']:
dc_set = self._map_data.createVariable('dc', 'f8', ('time', 'nmesh2d_face'))
dc_set.long_name = 'coral plate diameter'
dc_set.units = 'm'
dc_set[0, :] = coral.dc
hc_set = self._map_data.createVariable('hc', 'f8', ('time', 'nmesh2d_face'))
hc_set.long_name = 'coral height'
hc_set.units = 'm'
hc_set[0, :] = coral.hc
bc_set = self._map_data.createVariable('bc', 'f8', ('time', 'nmesh2d_face'))
bc_set.long_name = 'coral base diameter'
bc_set.units = 'm'
bc_set[0, :] = coral.bc
tc_set = self._map_data.createVariable('tc', 'f8', ('time', 'nmesh2d_face'))
tc_set.long_name = 'coral plate thickness'
tc_set.units = 'm'
tc_set[0, :] = coral.tc
ac_set = self._map_data.createVariable('ac', 'f8', ('time', 'nmesh2d_face'))
ac_set.long_name = 'coral axial distance'
ac_set.units = 'm'
ac_set[0, :] = coral.ac
vc_set = self._map_data.createVariable('Vc', 'f8', ('time', 'nmesh2d_face'))
vc_set.long_name = 'coral volume'
vc_set.units = 'm3'
vc_set[0, :] = coral.volume
self._map_data.close()
def update_map(self, coral, year):
"""Write data as annual output covering the whole model domain.
:param coral: coral animal
:param year: simulation year
:type coral: Coral
:type year: int
"""
if self._map_output is not None and any(self._map_output.values()):
self._map_data = Dataset(self.file_name_map, mode='a')
i = int(year - self.first_year)
self._map_data['time'][i] = year
if self._map_output['lme']:
self._map_data['Iz'][-1, :] = coral.light[:, -1]
if self._map_output['fme']:
self._map_data['ucm'][-1, :] = coral.ucm
if self._map_output['tme']:
self._map_data['Tc'][-1, :] = coral.temp[:, -1]
self._map_data['Tlo'][-1, :] = coral.Tlo if len(DataReshape.variable2array(coral.Tlo)) > 1 else coral.Tlo * np.ones(self.space)
self._map_data['Thi'][-1, :] = coral.Thi if len(DataReshape.variable2array(coral.Thi)) > 1 else coral.Thi * np.ones(self.space)
if self._map_output['pd']:
self._map_data['PD'][-1, :] = coral.photo_rate.mean(axis=1)
if self._map_output['ps']:
self._map_data['PT'][-1, :] = coral.pop_states[:, -1, :].sum(axis=1)
self._map_data['PH'][-1, :] = coral.pop_states[:, -1, 0]
self._map_data['PR'][-1, :] = coral.pop_states[:, -1, 1]
self._map_data['PP'][-1, :] = coral.pop_states[:, -1, 2]
self._map_data['PB'][-1, :] = coral.pop_states[:, -1, 3]
if self._map_output['calc']:
self._map_data['calc'][-1, :] = coral.calc.sum(axis=1)
if self._map_output['md']:
self._map_data['dc'][-1, :] = coral.dc
self._map_data['hc'][-1, :] = coral.hc
self._map_data['bc'][-1, :] = coral.bc
self._map_data['tc'][-1, :] = coral.tc
self._map_data['ac'][-1, :] = coral.ac
self._map_data['Vc'][-1, :] = coral.volume
self._map_data.close()
@property
def xy_stations(self):
"""(x,y)-coordinates of the stations.
:rtype: numpy.ndarray
"""
if self._xy_stations is None:
x = self.xy_coordinates[:,0]
y = self.xy_coordinates[:,1]
x_station = self.xy_coordinates[self.outpoint,0]
y_station = self.xy_coordinates[self.outpoint,1]
idx = np.zeros(self.nout_his)
for s in range(len(idx)):
idx[s] = np.argmin((x - x_station[s]) ** 2 + (y - y_station[s]) ** 2)
self._idx_stations = idx.astype(int)
self._xy_stations = self.xy_coordinates[self._idx_stations,:]
return self._xy_stations
@property
def idx_stations(self):
"""Space indices of stations.
:rtype: numpy.ndarray
"""
return self._idx_stations
@xy_stations.setter
def set_xy_stations(self):
"""Determine space indices based on the (x,y)-coordinates of the stations.
"""
return self.xy_stations
def initiate_his(self):
"""Initiate history output file in which daily output at predefined locations within the model is stored."""
if self._his_output is not None and any(self._his_output.values()):
self._his_data = Dataset(self.file_name_his, 'w', format='NETCDF4')
self._his_data.description = 'Historic simulation data of the CoralModel'
# dimensions
self._his_data.createDimension('time', None)
self._his_data.createDimension('stations', len(self.xy_stations))
# variables
t = self._his_data.createVariable('time', 'f8', ('time',))
t.long_name = f'days since {self.first_date}'
t.units = 'days'
x = self._his_data.createVariable('station_x_coordinate', 'f8', ('stations',))
y = self._his_data.createVariable('station_y_coordinate', 'f8', ('stations',))
# setup data set
x[:] = self.xy_stations[:, 0]
y[:] = self.xy_stations[:, 1]
if self._his_output['lme']:
light_set = self._his_data.createVariable('Iz', 'f8', ('time', 'stations'))
light_set.long_name = 'representative light-intensity'
light_set.units = 'micro-mol photons m-2 s-1'
if self._his_output['fme']:
flow_set = self._his_data.createVariable('ucm', 'f8', ('time', 'stations'))
flow_set.long_name = 'in-canopy flow'
flow_set.units = 'm s-1'
if self._his_output['tme']:
temp_set = self._his_data.createVariable('Tc', 'f8', ('time', 'stations'))
temp_set.long_name = 'coral temperature'
temp_set.units = 'K'
low_temp_set = self._his_data.createVariable('Tlo', 'f8', ('time', 'stations'))
low_temp_set.long_name = 'lower thermal limit'
low_temp_set.units = 'K'
high_temp_set = self._his_data.createVariable('Thi', 'f8', ('time', 'stations'))
high_temp_set.long_name = 'upper thermal limit'
high_temp_set.units = 'K'
if self._his_output['pd']:
pd_set = self._his_data.createVariable('PD', 'f8', ('time', 'stations'))
pd_set.long_name = 'photosynthetic rate'
pd_set.units = '-'
if self._his_output['ps']:
pt_set = self._his_data.createVariable('PT', 'f8', ('time', 'stations'))
pt_set.long_name = 'total coral population'
pt_set.units = '-'
ph_set = self._his_data.createVariable('PH', 'f8', ('time', 'stations'))
ph_set.long_name = 'healthy coral population'
ph_set.units = '-'
pr_set = self._his_data.createVariable('PR', 'f8', ('time', 'stations'))
pr_set.long_name = 'recovering coral population'
pr_set.units = '-'
pp_set = self._his_data.createVariable('PP', 'f8', ('time', 'stations'))
pp_set.long_name = 'pale coral population'
pp_set.units = '-'
pb_set = self._his_data.createVariable('PB', 'f8', ('time', 'stations'))
pb_set.long_name = 'bleached coral population'
pb_set.units = '-'
if self._his_output['calc']:
calc_set = self._his_data.createVariable('G', 'f8', ('time', 'stations'))
calc_set.long_name = 'calcification'
calc_set.units = 'kg m-2 d-1'
if self._his_output['md']:
dc_set = self._his_data.createVariable('dc', 'f8', ('time', 'stations'))
dc_set.long_name = 'coral plate diameter'
dc_set.units = 'm'
hc_set = self._his_data.createVariable('hc', 'f8', ('time', 'stations'))
hc_set.long_name = 'coral height'
hc_set.units = 'm'
bc_set = self._his_data.createVariable('bc', 'f8', ('time', 'stations'))
bc_set.long_name = 'coral base diameter'
bc_set.units = 'm'
tc_set = self._his_data.createVariable('tc', 'f8', ('time', 'stations'))
tc_set.long_name = 'coral plate thickness'
tc_set.units = 'm'
ac_set = self._his_data.createVariable('ac', 'f8', ('time', 'stations'))
ac_set.long_name = 'coral axial distance'
ac_set.units = 'm'
vc_set = self._his_data.createVariable('Vc', 'f8', ('time', 'stations'))
vc_set.long_name = 'coral volume'
vc_set.units = 'm3'
self._his_data.close()
def update_his(self, coral, dates):
"""Write data as daily output at predefined locations within the model domain.
:param coral: coral animal
:param dates: dates of simulation year
:type coral: Coral
:type dates: pandas
"""
if self._his_output is not None and any(self._his_output.values()):
self._his_data = Dataset(self.file_name_his, mode='a')
y_dates = dates.reset_index(drop=True)
ti = (y_dates - self.first_date).dt.days.values
self._his_data['time'][ti] = y_dates.values
if self._his_output['lme']:
self._his_data['Iz'][ti, :] = coral.light[self.idx_stations, :].transpose()
if self._his_output['fme']:
self._his_data['ucm'][ti, :] = np.tile(coral.ucm, (len(y_dates), 1))[:, self.idx_stations]
if self._his_output['tme']:
self._his_data['Tc'][ti, :] = coral.temp[self.idx_stations, :].transpose()
if len(DataReshape.variable2array(coral.Tlo)) > 1 and len(DataReshape.variable2array(coral.Thi)) > 1:
self._his_data['Tlo'][ti, :] = np.tile(coral.Tlo, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['Thi'][ti, :] = np.tile(coral.Thi, (len(y_dates), 1))[:, self.idx_stations]
else:
self._his_data['Tlo'][ti, :] = coral.Tlo * np.ones((len(y_dates), len(self.idx_stations)))
self._his_data['Thi'][ti, :] = coral.Thi * np.ones((len(y_dates), len(self.idx_stations)))
if self._his_output['pd']:
self._his_data['PD'][ti, :] = coral.photo_rate[self.idx_stations, :].transpose()
if self._his_output['ps']:
self._his_data['PT'][ti, :] = coral.pop_states[self.idx_stations, :, :].sum(axis=2).transpose()
self._his_data['PH'][ti, :] = coral.pop_states[self.idx_stations, :, 0].transpose()
self._his_data['PR'][ti, :] = coral.pop_states[self.idx_stations, :, 1].transpose()
self._his_data['PP'][ti, :] = coral.pop_states[self.idx_stations, :, 2].transpose()
self._his_data['PB'][ti, :] = coral.pop_states[self.idx_stations, :, 3].transpose()
if self._his_output['calc']:
self._his_data['G'][ti, :] = coral.calc[self.idx_stations, :].transpose()
if self._his_output['md']:
self._his_data['dc'][ti, :] = np.tile(coral.dc, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['hc'][ti, :] = np.tile(coral.hc, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['bc'][ti, :] = np.tile(coral.bc, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['tc'][ti, :] = np.tile(coral.tc, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['ac'][ti, :] = np.tile(coral.ac, (len(y_dates), 1))[:, self.idx_stations]
self._his_data['Vc'][ti, :] = np.tile(coral.volume, (len(y_dates), 1))[:, self.idx_stations]
self._his_data.close()
class CoralOnly:
"""Execute functions only in the presence of corals."""
spacetime = None
@property
def space(self):
"""Space dimension."""
return None if self.spacetime is None else self.spacetime[0]
@property
def time(self):
"""Time dimension."""
return None if self.spacetime is None else self.spacetime[1]
def in_space(self, coral, function, args, no_cover_value=0):
"""Only execute the function when there is coral cover.
:param coral: coral object
:param function: function to be executed
:param args: input arguments of the function
:param no_cover_value: default value in absence of coral cover
:type coral: Coral
:type args: tuple
:type no_cover_value: float, optional
"""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, (float, int)) or (isinstance(arg, np.ndarray) and not arg.shape):
args[i] = np.repeat(arg, self.space)
elif not len(arg) == self.space:
msg = f'Sizes do not match | |
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch | |
of the document
callback : str or callable
JavaScript or Python callback to be executed when clicking on an image. A
dictionnary containing the data for the full cell is directly available as
`data` in JS. For Python, the callback function must have `data` as the first
argument to the function. All the values in the `data` dict are parsed as
strings, except "mols2grid-id" which is always an integer.
sort_by : str or None
Sort the grid according to the following field (which must be present in
`subset` or `tooltip`).
"""
if self.mol_col:
df = self.dataframe.drop(columns=self.mol_col).copy()
else:
df = self.dataframe.copy()
cell_width = self.img_size[0]
smiles = self.smiles_col
content = []
column_map = {}
width = n_cols * (cell_width + 2 * (gap + 2))
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
# define fields that are searchable and sortable
search_cols = [f"data-{col}" for col in subset if col != "img"]
if tooltip:
search_cols.append("mols2grid-tooltip")
sort_cols = search_cols[:-1]
sort_cols.extend([f"data-{col}" for col in tooltip])
for col in tooltip:
if col not in subset:
s = f'<div class="data data-{col}" style="display: none;"></div>'
content.append(s)
column_map[col] = f"data-{col}"
else:
sort_cols = search_cols[:]
sort_cols = ["mols2grid-id"] + sort_cols
# get unique list but keep order
sort_cols = list(dict.fromkeys(sort_cols))
if style is None:
style = {}
if transform is None:
transform = {}
if tooltip is None:
tooltip = []
value_names = list(set(subset + [smiles] + tooltip))
value_names = [f"data-{col}" for col in value_names]
# force id, SMILES, and tooltip values to be present in the data
final_columns = subset[:]
final_columns.extend(["mols2grid-id", smiles])
if tooltip:
final_columns.extend(tooltip)
final_columns = list(set(final_columns))
# make a copy if id shown explicitely
if "mols2grid-id" in subset:
id_name = "mols2grid-id-copy"
df[id_name] = df["mols2grid-id"]
value_names.append(f"data-{id_name}")
final_columns.append(id_name)
subset = [id_name if x == "mols2grid-id" else x for x in subset]
# organize data
for col in subset:
if col == "img" and tooltip:
s = (f'<a tabindex="0" class="data data-{col} mols2grid-tooltip" '
'data-toggle="popover" data-content="foo"></a>')
else:
if style.get(col):
s = f'<div class="data data-{col} style-{col}" style=""></div>'
else:
s = f'<div class="data data-{col}"></div>'
content.append(s)
column_map[col] = f"data-{col}"
# add but hide SMILES div if not present
if smiles not in (subset + tooltip):
s = f'<div class="data data-{smiles}" style="display: none;"></div>'
content.append(s)
column_map[smiles] = f"data-{smiles}"
# set mapping for list.js
if "__all__" in style.keys():
whole_cell_style = True
x = "[{data: ['mols2grid-id', 'cellstyle']}, "
else:
whole_cell_style = False
x = "[{data: ['mols2grid-id']}, "
value_names = x + str(value_names)[1:]
# apply CSS styles
for col, func in style.items():
if col == "__all__":
name = "cellstyle"
df[name] = df.apply(func, axis=1)
else:
name = f"style-{col}"
df[name] = df[col].apply(func)
final_columns.append(name)
value_names = value_names[:-1] + f", {{ attr: 'style', name: {name!r} }}]"
if tooltip:
df["mols2grid-tooltip"] = df.apply(tooltip_formatter, axis=1,
args=(tooltip, tooltip_fmt, style,
transform))
final_columns = final_columns + ["mols2grid-tooltip"]
value_names = (value_names[:-1] +
", {attr: 'data-content', name: 'mols2grid-tooltip'}]")
# apply custom user function
for col, func in transform.items():
df[col] = df[col].apply(func)
if selection:
checkbox = '<input type="checkbox" class="position-relative float-left">'
else:
checkbox = ""
if whole_cell_style:
item = ('<div class="cell" data-mols2grid-id="0" '
'data-cellstyle="0">{checkbox}{content}</div>')
else:
item = ('<div class="cell" data-mols2grid-id="0">'
'{checkbox}{content}</div>')
item = item.format(checkbox=checkbox, content="".join(content))
# callback
if callable(callback):
if callback.__name__ == "<lambda>":
raise TypeError(
"Lambda functions are not supported as callbacks. Please "
"use a regular function instead.")
callback_type = "python"
callback = callback.__name__
else:
callback_type = "js"
if sort_by and sort_by != "mols2grid-id":
if sort_by in (subset + tooltip):
sort_by = f"data-{sort_by}"
else:
raise ValueError(f"{sort_by} is not an available field in "
"`subset` or `tooltip`")
else:
sort_by = "mols2grid-id"
df = df[final_columns].rename(columns=column_map).sort_values(sort_by)
template = env.get_template('pages.html')
template_kwargs = dict(
width = width,
border = border,
textalign = textalign,
cell_width = cell_width,
fontfamily = fontfamily,
fontsize = fontsize,
gap = gap,
hover_color = hover_color,
item = item,
item_repr = repr(item),
value_names = value_names,
tooltip = tooltip,
tooltip_trigger = repr(tooltip_trigger),
tooltip_placement = repr(tooltip_placement),
n_items_per_page = n_rows * n_cols,
search_cols = search_cols,
data = json.dumps(df.to_dict("records")),
selection = selection,
smiles_col = smiles,
sort_cols = sort_cols,
grid_id = self._grid_id,
whole_cell_style = whole_cell_style,
custom_css = custom_css or "",
custom_header = custom_header or "",
callback = callback,
callback_type = callback_type,
sort_by = sort_by,
)
return template.render(**template_kwargs)
def get_selection(self):
"""Retrieve the dataframe subset corresponding to your selection
Returns
-------
pandas.DataFrame
"""
sel = list(register.get_selection().keys())
return (self.dataframe.loc[self.dataframe["mols2grid-id"].isin(sel)]
.drop(columns=self._extra_columns))
def filter(self, mask):
"""Filters the grid using a mask (boolean array)
Parameters
----------
mask : list, pd.Series, np.ndarray
Boolean array: `True` when the item should be displayed, `False` if it should
be filtered out.
"""
# convert mask to mols2grid-id
ids = self.dataframe.loc[mask]["mols2grid-id"]
return self._filter_by_id(ids)
def filter_by_index(self, indices):
"""Filters the grid using the dataframe's index"""
# convert index to mols2grid-id
ids = self.dataframe.loc[self.dataframe.index.isin(indices)]["mols2grid-id"]
return self._filter_by_id(ids)
def _filter_by_id(self, ids):
"""Filters the grid using the values in the `mols2grid-id` column"""
if isinstance(ids, (pd.Series, np.ndarray)):
ids = ids.to_list()
code = env.get_template('js/filter.js').render(
grid_id = self._grid_id,
ids = ids)
return Javascript(code)
def to_table(self, subset=None, tooltip=None, n_cols=6,
cell_width=160, border="1px solid #cccccc", gap=0,
fontsize="12pt", fontfamily="'DejaVu', sans-serif",
textalign="center", tooltip_fmt="<strong>{key}</strong>: {value}",
tooltip_trigger="click hover", tooltip_placement="bottom",
hover_color="#e7e7e7", style=None, transform=None):
"""Returns the HTML document for the "table" template
Parameters
----------
subset : list or None
Columns to be displayed in each cell of the grid. Each
column's value will be displayed from top to bottom in the same
order given here. Use `"img"` for the image of the molecule.
Default: all columns (with "img" in first position)
tooltip : list or None
Columns to be displayed as a tooltip when hovering/clicking on the
image of a cell. Use `None` for no tooltip.
tooltip_fmt : str
Format string of each key/value pair in the tooltip
tooltip_trigger : str
Sequence of triggers for the tooltip: (click, hover, focus)
tooltip_placement : str
Position of the tooltip: auto, top, bottom, left, right
n_cols : int
Number of columns in the table
border : str
Styling of the border around each cell (CSS)
gap : int or str
Size of the margin around each cell (CSS)
fontsize : str
Font size of the text displayed in each cell (CSS)
fontfamily : str
Font used for the text in each cell (CSS)
textalign : str
Alignment of the text in each cell (CSS)
hover_color : str
Background color when hovering a cell (CSS)
style : dict or None
CSS styling applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of
the columns in `subset` or `tooltip`. The function takes the item's value as
input, and outputs a valid CSS styling, for example
`style={"Solubility": lambda x: "color: red" if x < -5 else "color: black"}`
if you want to color the text corresponding to the "Solubility"
column in your dataframe
transform : dict or None
Functions applied to specific items in all cells. The dict must follow a
`key: function` structure where the key must correspond to one of the columns
in `subset`. The function takes the item's value as input and transforms it,
for example:
`transform={"Solubility": lambda x: f"{x:.2f}",
"Melting point": lambda x: f"MP: {5/9*(x-32):.1f}°C"}`
will round the solubility to 2 decimals, and display the melting point in
Celsius instead of Fahrenheit with a single digit precision and some text
before (MP) and after (°C) the value. These transformations only affect
columns in `subset` and `tooltip`, and are applied independantly from `style`
"""
tr = []
data = []
df = self.dataframe
cell_width = self.img_size[0]
if subset is None:
subset = df.columns.tolist()
subset = [subset.pop(subset.index("img"))] + subset
if style is None:
style = {}
if transform is None:
transform = {}
for i, row in df.iterrows():
ncell = i + 1
nrow, ncol = divmod(i, n_cols)
td = [f'<td class="col-{ncol}>"']
if "__all__" in style.keys():
s = style["__all__"](row)
div = [f'<div class="cell-{i}" style="{s}">']
else:
div = [f'<div class="cell-{i}">']
for col in subset:
v = row[col]
if col == "img" | |
import logging
from json.decoder import JSONDecodeError
from typing import Dict, Optional, Union, List
from requests import Session, Response
from requests.exceptions import RequestException
from tmdbapis.exceptions import TMDbException, NotFound, Unauthorized, WritePermission, PrivateResource, \
Authentication, Invalid
logger = logging.getLogger(__name__)
base_url = "https://api.themoviedb.org/4"
class API4:
""" Raw V4 API Class containing all `TMDb API4 calls <https://developers.themoviedb.org/4/getting-started/authorization>`__.
Parameters:
access_token (str): TMDb V4 Access Token.
session (Optional[Session]): :class:`requests.Session` object.
validate (bool): Validate the TMDb V4 Access Token on creation.
Attributes:
access_token (str): TMDb V4 Access Token.
has_write_token (bool): Does the provided TMDb V4 Access Token have write access.
account_id (str): TMDb V4 Account ID.
response (Response): TMDb V4 most recent full :class:`requests.Response` object.
"""
def __init__(self, access_token: str, session: Optional[Session] = None, validate: bool = True):
self.access_token = access_token
self._account_id = None
self._session = Session() if session is None else session
self.response = None
if validate:
try:
self.auth_create_access_token(self.access_token)
except TMDbException:
self.auth_create_request_token()
def _get(self, path, **kwargs):
""" process get request. """
return self._request("get", path, **kwargs)
def _delete(self, path, json=None, **kwargs):
""" process delete request. """
return self._request("delete", path, json=json, **kwargs)
def _post(self, path, json=None, **kwargs):
""" process post request. """
return self._request("post", path, json=json, **kwargs)
def _put(self, path, json=None, **kwargs):
""" process put request. """
return self._request("put", path, json=json, **kwargs)
def _request(self, request_type, path, json=None, **kwargs):
""" process request. """
url_params = {k: v for k, v in kwargs.items() if v is not None}
body_json = {k: v for k, v in json.items() if v is not None} if json else None
request_url = f"{base_url}{path}"
logger.debug(f"Request URL: {request_url}")
headers = {"Authorization": f"Bearer {self.access_token}"}
if body_json is not None:
logger.debug(f"Request JSON: {body_json}")
logger.debug(f"Headers: {headers}")
try:
if request_type == "delete":
self.response = self._session.delete(request_url, json=body_json, headers=headers, params=url_params)
elif request_type == "post":
self.response = self._session.post(request_url, json=body_json, headers=headers, params=url_params)
elif request_type == "put":
self.response = self._session.put(request_url, json=body_json, headers=headers, params=url_params)
else:
self.response = self._session.get(request_url, headers=headers, params=url_params)
response_json = self.response.json()
except (RequestException, JSONDecodeError):
raise TMDbException(f"Failed to Connect to {base_url}")
logger.debug(f"Response ({self.response.status_code} [{self.response.reason}]) {response_json}")
if self.response.status_code == 401:
if "status_code" in response_json:
if response_json["status_code"] == 36:
raise WritePermission("Requires V4 Authentication, use tmdbapis.v4_authenticate(), then approve the returned URL, and finally run tmdbapis.v4_approved()")
elif response_json["status_code"] == 39:
raise PrivateResource(response_json["status_message"])
elif response_json["status_code"] == 7:
raise Unauthorized(response_json["status_message"])
else:
raise TMDbException(f"({response_json['status_code']}) {response_json['status_message']}")
else:
raise TMDbException(f"({self.response.status_code} [{self.response.reason}]) {response_json}")
elif self.response.status_code == 404:
raise NotFound(f"({self.response.status_code} [{self.response.reason}]) Requested Item Not Found")
elif self.response.status_code >= 400:
raise TMDbException(f"({self.response.status_code} [{self.response.reason}]) {response_json}")
elif "errors" in response_json:
raise TMDbException(response_json["errors"])
elif "success" in response_json and response_json["success"] is False:
raise TMDbException(response_json["status_message"])
return response_json
@property
def has_write_token(self):
return self._account_id is not None
@property
def account_id(self):
if not self._account_id:
raise Authentication(f"Requires V4 API Write Access Token, use tmdbapis.v4_access_token(access_token)")
return self._account_id
def account_get_lists(self, account_id: Optional[str] = None, page: Optional[int] = None) -> Dict:
""" `Account Get Lists <https://developers.themoviedb.org/4/account/get-account-lists>`__
Get all of the lists you've created.
Parameters:
account_id (Optional[str]): Account ID.
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/lists", page=page)
def account_get_favorite_movies(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Favorite Movies <https://developers.themoviedb.org/3/account/get-favorite-movies>`__
Get the list of movies you have marked as a favorite.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``created_at.asc``, ``created_at.desc``, ``release_date.asc``, ``release_date.desc``, ``title.asc``, ``title.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/movie/favorites", sort_by=sort_by, page=page)
def account_get_favorite_tv_shows(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Favorite TV Shows <https://developers.themoviedb.org/4/account/get-account-favorite-tv-shows>`__
Get the list of TV shows you have marked as a favorite.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``first_air_date.asc``, ``first_air_date.desc``, ``name.asc``, ``name.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/tv/favorites", sort_by=sort_by, page=page)
def account_get_movie_recommendations(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Movie Recommendations <https://developers.themoviedb.org/4/account/get-account-movie-recommendations>`__
Get a list of your personal movie recommendations.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``created_at.asc``, ``created_at.desc``, ``release_date.asc``, ``release_date.desc``, ``title.asc``, ``title.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/movie/recommendations", sort_by=sort_by, page=page)
def account_get_tv_show_recommendations(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get TV Show Recommendations <https://developers.themoviedb.org/4/account/get-account-tv-show-recommendations>`__
Get a list of your personal TV show recommendations.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``first_air_date.asc``, ``first_air_date.desc``, ``name.asc``, ``name.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/tv/rated", sort_by=sort_by, page=page)
def account_get_movie_watchlist(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Movie Watchlist <https://developers.themoviedb.org/4/account/get-account-movie-watchlist>`__
Get the list of movies you have added to your watchlist.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``created_at.asc``, ``created_at.desc``, ``release_date.asc``, ``release_date.desc``, ``title.asc``, ``title.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/movie/watchlist", sort_by=sort_by, page=page)
def account_get_tv_show_watchlist(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get TV Show Watchlist <https://developers.themoviedb.org/4/account/get-account-tv-show-watchlist>`__
Get the list of TV shows you have added to your watchlist.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``first_air_date.asc``, ``first_air_date.desc``, ``name.asc``, ``name.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/tv/watchlist", sort_by=sort_by, page=page)
def account_get_rated_movies(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Rated Movies <https://developers.themoviedb.org/4/account/get-account-rated-movies>`__
Get the list of movies you have rated.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``created_at.asc``, ``created_at.desc``, ``release_date.asc``, ``release_date.desc``, ``title.asc``, ``title.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/movie/rated", sort_by=sort_by, page=page)
def account_get_rated_tv_shows(
self, account_id: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] = None
) -> Dict:
""" `Account Get Rated TV Shows <https://developers.themoviedb.org/4/account/get-account-rated-tv-shows>`__
Get the list of TV shows you have rated.
Parameters:
account_id (Optional[str]): Account ID.
sort_by (Optional[str]): Choose a sort option for the list of results. Allowed Values: ``first_air_date.asc``, ``first_air_date.desc``, ``name.asc``, ``name.desc``, ``vote_average.asc``, ``vote_average.desc``
page (Optional[int]): Specify which page to query.
"""
return self._get(f"/account/{account_id if account_id else self.account_id}/tv/rated", sort_by=sort_by, page=page)
def auth_create_request_token(self, redirect_to: Optional[str] = None) -> Dict:
""" `Auth Create Request Token <https://developers.themoviedb.org/4/auth/create-request-token>`__
This method generates a new request token that you can ask a user to approve. This is the first step in
getting permission from a user to read and write data on their behalf. You can read more about this system
`here <https://developers.themoviedb.org/4/auth/user-authorization-1>`__.
Parameters:
redirect_to (Optional[str]): Redirect URL or callback that will be executed once a request token has been approved on TMDb.
"""
return self._post("/auth/request_token", json={"redirect_to": redirect_to} if redirect_to else None)
def auth_create_access_token(self, request_token: str) -> Dict:
""" `Auth Create Access Token <https://developers.themoviedb.org/4/auth/create-access-token>`__
This method will finish the user authentication flow and issue an official user access token. The request
token in this request is sent along as part of the POST body. You should still use your standard API read
access token for authenticating this request.
Parameters:
request_token (str): Request Token
"""
response = self._post("/auth/access_token", json={"request_token": request_token})
self.access_token = response["access_token"]
self._account_id = response["account_id"]
return response
def auth_delete_access_token(self, access_token: str) -> Dict:
""" `Auth Delete Access Token <https://developers.themoviedb.org/4/auth/delete-access-token>`__
This method gives your users the ability to log out of a session.
Parameters:
access_token (str): Access Token
"""
return self._delete("/auth/access_token", json={"access_token": access_token})
def list_get_list(
self, list_id: int,
language: Optional[str] = None,
sort_by: Optional[str] = None,
page: Optional[str] | |
in folders:
if isinstance(inc, bytes):
inc = inc.decode()
inc = sds_endswith(inc, add=True)
# don't put .sds in result item name
name = inc[:-4]
# don't check for existence of file, CPP loader will just skip it
# pull ref to list, or create a new one
inc_list = include_dict.setdefault(name, [])
inc_list.append(path+inc)
# build single dataset for each include item
for inc, files in include_dict.items():
include_dict[inc] = _stack_sds_files(files, share=share, info=info, sections=sections)
# if just one item pop it
#if len(include_dict) == 1:
# return include_dict.popitem()[1]
# return all items in struct container
return TypeRegister.Struct(include_dict)
#-----------------------------------------------------------------------------------------
def _load_sds_internal(
filepath: Union[AnyPath, Sequence[AnyPath]],
share: Optional[str] = None,
info: bool = False,
include_all_sds: bool = False,
include: Optional[List[str]] = None,
stack: Optional[bool] = None,
name: Optional[str] = None,
threads: Optional[int] = None,
folders: Optional[List[str]] = None,
filter: Optional[np.ndarray] = None,
mustexist: bool = False,
sections: Optional[List[str]] = None,
reserve: float = 0.0
):
'''
All explicit `load_sds` calls will be funneled into this routine.
See docstrings for load_sds(), load_sds_mem(), sds_tree(), sds_info()
'''
if isinstance(include, (str, bytes)):
include=[include]
# All folder names have to end in /
if folders is not None:
if isinstance(folders, (str, bytes)):
folders = [folders]
if not isinstance(folders, list):
raise ValueError(f'The folders kwarg must be a list of strings of dataset or struct names to include. {folders}')
if stack:
if isinstance(filepath, (str, bytes, os.PathLike)):
filepath=[filepath]
# Convert path-like objects to str/bytes (for compatibility with code below).
filepath = [os.fspath(x) for x in filepath]
files, sds_filelist, badlist, hasdir, hasfile = _sds_dir_from_file_list(filepath, share=share, mustexist=mustexist)
if hasdir:
if hasfile:
raise TypeError(f'List of files must contain only directories or only .sds files. {filepath}')
else:
# only directories
if include is None and folders is None:
raise ValueError(f'SDS stacking only implemented for Datasets. Must provide folders list if loading from multiple Struct directories.')
return _stack_sds_dirs(files, share=share, info=info, include=include, folders=folders, sections=sections, threads=threads)
else:
# only files
# TODO: Check if stacking with onefile (have to read file type of first file??)
# TODO folders= must be preserved
if folders is not None:
# make sure all folders end with slash
newfolders=[]
for f in folders:
if not f.endswith('/'):
f = f + '/'
newfolders.append(f)
folders=newfolders
# assume onefile mode
include_dict={}
for f in folders:
fname = f[:-1]
include_dict[fname] = _stack_sds_files(sds_filelist, share=share, info=info, include=include, folders=[f], sections=sections, threads=threads, filter=filter, reserve=reserve)
return TypeRegister.Struct(include_dict)
return _stack_sds_files(sds_filelist, share=share, info=info, include=include, folders=folders, sections=sections, threads=threads,
filter=filter, mustexist=mustexist, reserve=reserve)
# not stacked
# string-only operations until final load
if isinstance(filepath, os.PathLike):
filepath = os.fspath(filepath)
if isinstance(filepath, bytes):
filepath = filepath.decode()
# list of full filepaths provided
elif isinstance(filepath, list):
files, single_sds, _, _, _ = _sds_dir_from_file_list(filepath, mustexist=mustexist)
return _sds_load_from_list(files, single_sds, share=share, info=info, include=include, threads=threads,
filter=filter, mustexist=mustexist, folders=folders, sections=sections)
if sds_endswith(filepath) or share is not None:
# do not have a try
result = _load_sds(filepath, sharename=share, info=info, include_all_sds=include_all_sds, include=include, name=name, stack=stack, threads=threads,
filter=filter, mustexist=mustexist, folders=folders, sections=sections)
else:
# change so only one routine (_load_sds) attempts to fix file
# do this when shared memory load gets forked
# try to load with extension and without (due to people naming directories with .sds extensions)
try:
result = _load_sds(filepath, sharename=share, info=info, include_all_sds=include_all_sds, include=include, name=name, stack=stack, threads=threads,
filter=filter, folders=folders, sections=sections)
origerror = None
except Exception:
origerror = sys.exc_info()[1]
if origerror is not None:
# try again with extension
filepath = filepath+SDS_EXTENSION
try:
result = _load_sds(filepath, sharename=share, info=info, include_all_sds=include_all_sds, include=include, name=name, stack=stack, threads=threads,
filter=filter, folders=folders, sections=sections)
except Exception:
raise ValueError(f'Could not load item with filepath {filepath!r} and shared name {share!r}. First error: {origerror!r}. Second error {sys.exc_info()[1]}')
if info:
# tree from struct, otherwise single string from array
if isinstance(result, TypeRegister.Struct):
result = TypeRegister.Struct._info_tree(filepath, result)
return result
#-----------------------------------------------------------------------------------------
def load_sds(
filepath: Union[AnyPath, Sequence[AnyPath]],
share: Optional[str] = None,
info: bool = False,
include_all_sds: bool = False,
include: Optional[List[str]] = None,
name: Optional[str] = None,
threads: Optional[int] = None,
stack: Optional[bool] = None,
folders: Optional[List[str]] = None,
sections: Optional[List[str]] = None,
filter: Optional[np.ndarray] = None,
mustexist: bool = False,
verbose: bool = False,
reserve: float = 0.0
) -> 'Struct':
r"""
Load a dataset from single ``.sds`` file or struct from directory of ``.sds`` files.
When ``stack=True``, generic loader for a single ``.sds`` file or directory of multiple ``.sds`` files.
Parameters
----------
filepath : str or bytes or os.PathLike or sequence of str
Full path to file or directory.
When `stack` is ``True`` can be list of ``.sds`` files to stack
When `stack` is ``True`` list of directories containing ``.sds`` files to stack (must also use kwarg `include`)
share : str, optional
The shared memory name. loader will check for dataset in shared memory first and if it's not there, the
data (if the filepath is found on disk) will be loaded into the user's workspace AND shared memory.
A sharename must be accompanied by a file name. The rest of a full path will be trimmed off internally.
Defaults to None. For Windows make sure SE_CREATE_GLOBAL_NAME flag is set.
info : bool
No item data will be loaded, the hierarchy will be displayed in a tree (defaults to False).
include_all_sds : bool
If ``True``, any extra files in saved struct's directory will be loaded into final struct (skips user prompt) (defaults to False).
include : list of str, optional
A list of strings of which columns to load, e.g. ``['Ask','Bid']``.
When `stack` is ``True`` and directories passed, list of filenames to stack across each directory (defaults to None).
name : str, optional
Optionally specify the name of the struct being loaded. This might be different than directory (defaults to None).
threads : int, optional
How many threads to read, stack, and decompress with (defaults to None).
stack : bool, optional
Set to ``True`` to stack array data before loading into python (see docstring for `stack_sds`).
Set to ``False`` when appending many files into one and want columns flattening.
This parameter is not compatible with the `share` or `info` parameters (defaults to None).
folders : list of str, optional
A list of strings on which folders to include e.g., ``['zz/','xtra/']`` (must be saved with ``onefile=True``) (defaults to None).
sections : list of str, optional
A list of strings on which sections to include (must be saved with ``append="name"``) (defaults to None).
filter : ndarray, optional
Optional fancy index or boolean array. Does not work with ``stack=True``.
Designed to read in contiguous sections; for example, ``filter=arange(10)`` to read first 10 elements (defaults to None).
mustexist : bool
Set to True to ensure that all files exist or raise an exception (defaults to False).
verbose : bool
Prints time related data to stdout (defaults to False).
reserve : float
When set greater than 0.0 and less than 1.0, this is how much extra room is reserved when stacking.
If set to 0.10, it will allocate 10% more memory for future partitions.
Defaults to 0.0.
Returns
-------
Struct
Notes
-----
When `stack` is ``True``:
- columns with the same name must have matching types or upcastable types
- bytestring widths will be fixed internally
- numeric types will be upcast appropriately
- missing columns will be filled with the invalid value for the column type
Examples
--------
Stacking multiple files together while loading:
>>> files = [ r'D:\dir1\ds1.sds' r'D:\dir2\ds1.sds' ]
>>> load_sds(files, stack=True)
# col_0 col_1 col_2 col_3 col_4
- ----- ----- ----- ----- -----
0 0.71 0.86 0.44 0.97 0.47
1 0.89 0.40 0.10 0.94 0.66
2 0.03 0.56 0.80 0.85 0.30
Stacking multiple files together while loading, explicitly specifying the
list of columns to be loaded.
>>> files = [ r'D:\dir1\ds1.sds' r'D:\dir2\ds1.sds' ]
>>> include = ['col_0', 'col_1', 'col_4']
>>> load_sds(files, include=include, stack=True)
# col_0 col_1 col_4
- ----- ----- -----
0 0.71 0.86 0.47
1 0.89 0.40 0.66
2 0.03 0.56 0.30
Stacking multiple directories together while loading, explicitly specifying
the list of `Dataset`s to load (from each directory, then | |
param
] + list(value))
else:
log.error(f'Unexpected parameter value {value} for parameter {param}.')
return False
if process_result.returncode != 0:
log.error(f'Unexpected return code from NSSM when modifying at parameter. '
f'Return code {process_result.returncode}')
return False
return True
def get_service_details(nssm_binary, service):
# Return some service details
process_result = subprocess.run([
str(nssm_binary), 'dump', service
], capture_output=True, text=True, encoding='utf8')
if process_result.returncode != 0:
return None
service_details = {
'install': None,
'status': None,
'parameters': {}
}
process_output = process_result.stdout
result = re.search(r'nssm\.exe install \S+( (?P<install>.+))?', process_output)
if result:
service_details['install'] = result.group('install')
for result in re.finditer(r'nssm.exe set \S+( (?P<param>\S+))?( (?P<quote>")?(?P<value>.+?)(?P=quote)?)?(\n|$)', process_output):
param = result.group('param')
value = result.group('value')
if param is not None:
service_details['parameters'][param] = value
process_result = subprocess.run([
str(nssm_binary), 'status', service
], capture_output=True, text=True, encoding='utf8')
if process_result.returncode == 0:
process_output = process_result.stdout
service_details['status'] = process_output.strip()
return service_details
def is_stable_windows_amd64_archive(name):
return (
name.find('windows') != -1 and
name.endswith('.zip') and
name.find('amd64') != -1 and
name.find('unstable') == -1 and
name.find('alltools') == -1
)
def install_gpg(base_directory):
# Install the GPG binary
# Check if gnupg is already installed
gpg_installed = False
gpg_binary_path = base_directory.joinpath('bin', 'gpg.exe')
if gpg_binary_path.is_file():
process_result = subprocess.run([
str(gpg_binary_path), '--version'
])
if process_result.returncode == 0:
gpg_installed = True
if gpg_installed:
log.info('GNUPG is already installed, no need to install it')
return True
# Get the gnupg install URL
gpg_installer_url = None
try:
response = httpx.get(GNUPG_DOWNLOAD_URL, follow_redirects=True)
if response.status_code != 200:
log.error(f'Cannot connect to GNUPG download URL {GNUPG_DOWNLOAD_URL}.\n'
f'Unexpected status code {response.status_code}')
return False
response_text = response.text
match = re.search(r'href="(?P<url>[^"]+gnupg-w32-[^"]+.exe)"', response_text)
if not match:
log.error(f'Cannot find GNUPG installer on GNUPG download URL {GNUPG_DOWNLOAD_URL}.')
return False
gpg_installer_url = urljoin(GNUPG_DOWNLOAD_URL, match.group('url'))
except httpx.RequestError as exception:
log.error(f'Cannot connect to GNUPG download URL {GNUPG_DOWNLOAD_URL}.\n'
f'Exception {exception}')
return False
if gpg_installer_url is None:
return False
download_path = base_directory.joinpath('downloads')
download_path.mkdir(parents=True, exist_ok=True)
# Download the gnupg installer
file_name = urlparse(gpg_installer_url).path.split('/')[-1]
download_installer_path = download_path.joinpath(file_name)
if download_installer_path.is_file():
download_installer_path.unlink()
try:
with open(download_installer_path, 'wb') as binary_file:
log.info('Downloading GNUPG installer...')
with httpx.stream('GET', gpg_installer_url, follow_redirects=True) as http_stream:
if http_stream.status_code != 200:
log.error(f'Cannot download GNUPG installer {gpg_installer_url}.\n'
f'Unexpected status code {http_stream.status_code}')
return False
for data in http_stream.iter_bytes():
binary_file.write(data)
except httpx.RequestError as exception:
log.error(f'Exception while downloading GNUPG installer. Exception {exception}')
return False
# Run installer silently
log.info('Installing GNUPG...')
process_result = subprocess.run([
str(download_installer_path), '/S', '/D=' + str(base_directory)
])
if process_result.returncode != 0:
log.error(f'Failed to install GNUPG. Return code {process_result.returncode}')
return False
# Remove download leftovers
download_installer_path.unlink()
if not gpg_binary_path.is_file():
log.error(f'Could not find GPG binary after installation. '
f'Expected to be in {gpg_binary_path}')
return False
process_result = subprocess.run([
str(gpg_binary_path), '--version'
])
if process_result.returncode != 0:
log.error(f'Unexpected return from gpg binary. Return code {process_result.returncode}')
return False
return True
def install_jre(base_directory):
# Install Adoptium JRE
# Check if jre is already installed
jre_path = base_directory.joinpath('bin', 'jre')
java_path = jre_path.joinpath('bin', 'java.exe')
jre_found = False
jre_version = 'unknown'
if java_path.is_file():
try:
process_result = subprocess.run([
str(java_path), '--version'
], capture_output=True, text=True, encoding='utf8')
jre_found = True
process_output = process_result.stdout
result = re.search(r'OpenJDK Runtime Environment (.*?)\n', process_output)
if result:
jre_version = result.group(1).strip()
except FileNotFoundError:
pass
install_jre = True
if jre_found:
result = button_dialog(
title='JRE found',
text=(
f'''
The JRE seems to have already been installed. Here are some details found:
Version: {jre_version}
Location: {jre_path}
Do you want to skip installing the JRE?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
install_jre = (result == 2)
if install_jre:
windows_builds = []
try:
log.info('Getting JRE builds...')
response = httpx.get(ADOPTIUM_17_API_URL, params=ADOPTIUM_17_API_PARAMS,
follow_redirects=True)
if response.status_code != 200:
log.error(f'Cannot connect to JRE builds URL {ADOPTIUM_17_API_URL}.\n'
f'Unexpected status code {response.status_code}')
return False
response_json = response.json()
if (
type(response_json) is not list or
len(response_json) == 0 or
type(response_json[0]) is not dict):
log.error(f'Unexpected response from JRE builds URL {ADOPTIUM_17_API_URL}')
return False
binaries = response_json
for binary in binaries:
if 'binary' not in binary:
continue
binary = binary['binary']
if (
'architecture' not in binary or
'os' not in binary or
'package' not in binary or
'image_type' not in binary or
'updated_at' not in binary):
continue
image_type = binary['image_type']
architecture = binary['architecture']
binary_os = binary['os']
if not (
binary_os == 'windows' and
architecture == 'x64' and
image_type == 'jre'):
continue
package = binary['package']
updated_at = dateparse(binary['updated_at'])
if (
'name' not in package or
'checksum' not in package or
'link' not in package):
log.error(f'Unexpected response from JRE builds URL '
f'{ADOPTIUM_17_API_URL} in package')
return False
package_name = package['name']
package_link = package['link']
package_checksum = package['checksum']
windows_builds.append({
'name': package_name,
'updated_at': updated_at,
'link': package_link,
'checksum': package_checksum
})
except httpx.RequestError as exception:
log.error(f'Cannot connect to JRE builds URL {ADOPTIUM_17_API_URL}.'
f'\nException {exception}')
return False
if len(windows_builds) <= 0:
log.error('No JRE builds found on adoptium.net. We cannot continue.')
return False
# Download latest JRE build and its signature
windows_builds.sort(key=lambda x: (x['updated_at'], x['name']), reverse=True)
latest_build = windows_builds[0]
download_path = base_directory.joinpath('downloads')
download_path.mkdir(parents=True, exist_ok=True)
jre_archive_path = download_path.joinpath(latest_build['name'])
if jre_archive_path.is_file():
jre_archive_path.unlink()
try:
with open(jre_archive_path, 'wb') as binary_file:
log.info(f'Downloading JRE archive {latest_build["name"]}...')
with httpx.stream('GET', latest_build['link'],
follow_redirects=True) as http_stream:
if http_stream.status_code != 200:
log.error(f'Cannot download JRE archive {latest_build["link"]}.\n'
f'Unexpected status code {http_stream.status_code}')
return False
for data in http_stream.iter_bytes():
binary_file.write(data)
except httpx.RequestError as exception:
log.error(f'Exception while downloading JRE archive. Exception {exception}')
return False
# Unzip JRE archive
archive_members = None
log.info(f'Extracting JRE archive {latest_build["name"]}...')
with ZipFile(jre_archive_path, 'r') as zip_file:
archive_members = zip_file.namelist()
zip_file.extractall(download_path)
# Remove download leftovers
jre_archive_path.unlink()
if archive_members is None or len(archive_members) == 0:
log.error('No files found in JRE archive. We cannot continue.')
return False
# Move all those extracted files into their final destination
if jre_path.is_dir():
shutil.rmtree(jre_path)
jre_path.mkdir(parents=True, exist_ok=True)
archive_extracted_dir = download_path.joinpath(Path(archive_members[0]).parts[0])
with os.scandir(archive_extracted_dir) as it:
for diritem in it:
shutil.move(diritem.path, jre_path)
# Make sure jre was installed properly
jre_found = False
try:
process_result = subprocess.run([
str(java_path), '--version'
], capture_output=True, text=True, encoding='utf8')
jre_found = True
process_output = process_result.stdout
result = re.search(r'OpenJDK Runtime Environment (.*?)\n', process_output)
if result:
jre_version = result.group(1).strip()
except FileNotFoundError:
pass
if not jre_found:
log.error(f'We could not find the java binary from the installed JRE in {java_path}. '
f'We cannot continue.')
return False
return True
def install_teku(base_directory, network, keys, eth1_fallbacks, consensus_checkpoint_url, ports):
# Install Teku for the selected network
base_directory = Path(base_directory)
nssm_binary = get_nssm_binary()
if not nssm_binary:
return False
# Check for existing service
teku_service_exists = False
teku_service_name = 'teku'
service_details = get_service_details(nssm_binary, teku_service_name)
if service_details is not None:
teku_service_exists = True
if teku_service_exists:
result = button_dialog(
title='Teku service found',
text=(
f'''
The teku service seems to have already been created. Here are some details
found:
Display name: {service_details['parameters'].get('DisplayName')}
Status: {service_details['status']}
Binary: {service_details['install']}
App parameters: {service_details['parameters'].get('AppParameters')}
App directory: {service_details['parameters'].get('AppDirectory')}
Do you want to skip installing teku and its service?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
if result == 1:
return True
# User wants to proceed, make sure the teku service is stopped first
subprocess.run([
str(nssm_binary), 'stop', teku_service_name])
result = button_dialog(
title='Teku installation',
text=(
'''
This next step will install Teku, an Ethereum consensus client that
includes a beacon node and a validator client in the same binary
distribution.
It will install AdoptOpenJDK, a Java Runtime Environment, it will download
the official Teku binary distribution from GitHub, it will verify its
checksum and it will extract it for easy use. You will be invited to
provide an initial state to fast-track syncing.
Once installed locally, it will create a service that will automatically
start Teku on reboot or if it crashes. The Teku client will be started and
you will start syncing with the Ethereum network. The Teku client will
automatically start validating once syncing is completed and your
validator(s) are activated.
''' ),
buttons=[
('Install', True),
('Quit', False)
]
).run()
if not result:
return result
if not install_jre(base_directory):
return False
# Check if teku is already installed
teku_path = base_directory.joinpath('bin', 'teku')
teku_batch_file = teku_path.joinpath('bin', 'teku.bat')
teku_found = False
teku_version = 'unknown'
java_home = base_directory.joinpath('bin', 'jre')
if teku_batch_file.is_file():
try:
env = os.environ.copy()
env['JAVA_HOME'] = str(java_home)
process_result = subprocess.run([
str(teku_batch_file), '--version'
], capture_output=True, text=True, env=env)
teku_found = True
process_output = process_result.stdout
result = re.search(r'teku/(?P<version>[^/]+)', process_output)
if result:
| |
destinationStream, autoAdd = True):
'''
Assume that element1 and element2 are two elements in sourceStream
and destinationStream with other elements (say eA, eB, eC) between
them. For instance, element1 could be the downbeat at offset 10
in sourceStream (a Stream representing a score) and offset 20.5
in destinationStream (which might be a Stream representing the
timing of notes in particular recording at approximately but not
exactly qtr = 30). Element2 could be the following downbeat in 4/4,
at offset 14 in source but offset 25.0 in the recording:
>>> sourceStream = stream.Stream()
>>> destinationStream = stream.Stream()
>>> element1 = note.Note('C4', type='quarter')
>>> element2 = note.Note('G4', type='quarter')
>>> sourceStream.insert(10, element1)
>>> destinationStream.insert(20.5, element1)
>>> sourceStream.insert(14, element2)
>>> destinationStream.insert(25.0, element2)
Suppose eA, eB, and eC are three quarter notes that lie
between element1 and element2 in sourceStream
and destinationStream, as in:
>>> eA = note.Note('D4', type='quarter')
>>> eB = note.Note('E4', type='quarter')
>>> eC = note.Note('F4', type='quarter')
>>> sourceStream.insert(11, eA)
>>> sourceStream.insert(12, eB)
>>> sourceStream.insert(13, eC)
>>> destinationStream.append([eA, eB, eC]) # not needed if autoAdd were true
then running this function will cause eA, eB, and eC
to have offsets 21.625, 22.75, and 23.875 respectively
in destinationStream:
>>> tempo.interpolateElements(element1, element2, sourceStream, destinationStream, autoAdd = False)
>>> for el in [eA, eB, eC]:
... print(el.getOffsetBySite(destinationStream))
21.625
22.75
23.875
if the elements between element1 and element2 do not yet
appear in destinationStream, they are automatically added
unless autoAdd is False.
(with the default autoAdd, elements are automatically added to new streams):
>>> destStream2 = stream.Stream()
>>> destStream2.insert(10.1, element1)
>>> destStream2.insert(50.5, element2)
>>> tempo.interpolateElements(element1, element2, sourceStream, destStream2)
>>> for el in [eA, eB, eC]:
... print("%.1f" % (el.getOffsetBySite(destStream2),))
20.2
30.3
40.4
(unless autoAdd is set to false, in which case a Tempo Exception arises...)
>>> destStream3 = stream.Stream()
>>> destStream3.insert(100, element1)
>>> destStream3.insert(500, element2)
>>> eA.id = "blah"
>>> tempo.interpolateElements(element1, element2, sourceStream, destStream3, autoAdd = False)
Traceback (most recent call last):
...
TempoException: Could not find element <music21.note.Note D> with id ...
'''
try:
startOffsetSrc = element1.getOffsetBySite(sourceStream)
except exceptions21.Music21Exception:
raise TempoException("could not find element1 in sourceStream")
try:
startOffsetDest = element1.getOffsetBySite(destinationStream)
except exceptions21.Music21Exception:
raise TempoException("could not find element1 in destinationStream")
try:
endOffsetSrc = element2.getOffsetBySite(sourceStream)
except exceptions21.Music21Exception:
raise TempoException("could not find element2 in sourceStream")
try:
endOffsetDest = element2.getOffsetBySite(destinationStream)
except exceptions21.Music21Exception:
raise TempoException("could not find element2 in destinationStream")
scaleAmount = ((endOffsetDest - startOffsetDest + 0.0)/(endOffsetSrc - startOffsetSrc + 0.0))
interpolatedElements = sourceStream.getElementsByOffset(offsetStart = startOffsetSrc, offsetEnd = endOffsetSrc)
for el in interpolatedElements:
elOffsetSrc = el.getOffsetBySite(sourceStream)
try:
dummy = el.getOffsetBySite(destinationStream)
#print dummy, el
except base.SitesException:
if autoAdd is True:
destinationOffset = (scaleAmount * (elOffsetSrc - startOffsetSrc)) + startOffsetDest
destinationStream.insert(destinationOffset, el)
else:
raise TempoException("Could not find element %s with id %r in destinationStream and autoAdd is false" % (repr(el), el.id))
else:
destinationOffset = (scaleAmount * (elOffsetSrc - startOffsetSrc)) + startOffsetDest
el.setOffsetBySite(destinationStream, destinationOffset)
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
def testCopyAndDeepcopy(self):
'''Test copying all objects defined in this module
'''
import sys, types
for part in sys.modules[self.__module__].__dict__:
match = False
for skip in ['_', '__', 'Test', 'Exception']:
if part.startswith(skip) or part.endswith(skip):
match = True
if match:
continue
obj = getattr(sys.modules[self.__module__], part)
if callable(obj) and not isinstance(obj, types.FunctionType):
i = copy.copy(obj)
j = copy.deepcopy(obj)
def testSetup(self):
MM1 = MetronomeMark(number=60, referent=note.Note(type='quarter'))
self.assertEqual(MM1.number, 60)
TM1 = TempoText("Lebhaft")
self.assertEqual(TM1.text, "Lebhaft")
def testUnicode(self):
# test with no arguments
unused_tm = TempoText()
#environLocal.printDebug(['testing tempo instantion', tm])
unused_tm = TempoText("adagio")
mm = MetronomeMark("adagio")
self.assertEqual(mm.number, 56)
self.assertEqual(mm.numberImplicit, True)
self.assertEqual(mm.number, 56)
tm2 = TempoText(u"très vite")
self.assertEqual(tm2.text, u'très vite')
mm = tm2.getMetronomeMark()
self.assertEqual(mm.number, 144)
def testMetronomeMarkA(self):
mm = MetronomeMark()
mm.number = 56 # should implicitly set text
self.assertEqual(mm.text, 'adagio')
self.assertEqual(mm.textImplicit, True)
mm.text = 'slowish'
self.assertEqual(mm.text, 'slowish')
self.assertEqual(mm.textImplicit, False)
# default
self.assertEqual(mm.referent.quarterLength, 1.0)
# setting the text first
mm = MetronomeMark()
mm.text = 'presto'
mm.referent = duration.Duration(3.0)
self.assertEqual(mm.text, 'presto')
self.assertEqual(mm.number, 184)
self.assertEqual(mm.numberImplicit, True)
mm.number = 200
self.assertEqual(mm.number, 200)
self.assertEqual(mm.numberImplicit, False)
# still have default
self.assertEqual(mm.referent.quarterLength, 3.0)
self.assertEqual(repr(mm), '<music21.tempo.MetronomeMark presto Dotted Half=200>')
def testMetronomeMarkB(self):
mm = MetronomeMark()
# with no args these are set to None
self.assertEqual(mm.numberImplicit, None)
self.assertEqual(mm.textImplicit, None)
mm = MetronomeMark(number=100)
self.assertEqual(mm.number, 100)
self.assertEqual(mm.numberImplicit, False)
self.assertEqual(mm.text, None)
# not set
self.assertEqual(mm.textImplicit, None)
mm = MetronomeMark(number=101, text='rapido')
self.assertEqual(mm.number, 101)
self.assertEqual(mm.numberImplicit, False)
self.assertEqual(mm.text, 'rapido')
self.assertEqual(mm.textImplicit, False)
def testMetronomeModulationA(self):
# need to create a mm without a speed
# want to say that an eighth is becoming the speed of a sixteenth
mm1 = MetronomeMark(referent=.5, number=120)
mm2 = MetronomeMark(referent='16th')
mmod1 = MetricModulation()
mmod1.oldMetronome = mm1
mmod1.newMetronome = mm2
# this works, but the new value is set to None
self.assertEqual(str(mmod1), '<music21.tempo.MetricModulation <music21.tempo.MetronomeMark animato Eighth=120>=<music21.tempo.MetronomeMark 16th=None>>')
# we can get the same result by using setEqualityByReferent()
mm1 = MetronomeMark(referent=.5, number=120)
mmod1 = MetricModulation()
mmod1.oldMetronome = mm1
# will automatically set right mm, as presently is None
mmod1.setOtherByReferent(referent='16th')
# should get the same result as above, but with defined value
self.assertEqual(str(mmod1), '<music21.tempo.MetricModulation <music21.tempo.MetronomeMark animato Eighth=120>=<music21.tempo.MetronomeMark animato 16th=120>>')
# the effective speed as been slowed by this modulation
self.assertEqual(mmod1.oldMetronome.getQuarterBPM(), 60.0)
self.assertEqual(mmod1.newMetronome.getQuarterBPM(), 30.0)
def testGetPreviousMetronomeMarkA(self):
from music21 import stream
# test getting basic metronome marks
p = stream.Part()
m1 = stream.Measure()
m1.repeatAppend(note.Note(quarterLength=1), 4)
m2 = copy.deepcopy(m1)
mm1 = MetronomeMark(number=56, referent=.25)
m1.insert(0, mm1)
mm2 = MetronomeMark(number=150, referent=.5)
m2.insert(0, mm2)
p.append([m1, m2])
self.assertEqual(str(mm2.getPreviousMetronomeMark()), '<music21.tempo.MetronomeMark adagio 16th=56>')
#p.show()
def testGetPreviousMetronomeMarkB(self):
from music21 import stream
# test using a tempo text, will return a default metrone mark if possible
p = stream.Part()
m1 = stream.Measure()
m1.repeatAppend(note.Note(quarterLength=1), 4)
m2 = copy.deepcopy(m1)
mm1 = TempoText("slow")
m1.insert(0, mm1)
mm2 = MetronomeMark(number=150, referent=.5)
m2.insert(0, mm2)
p.append([m1, m2])
self.assertEqual(str(mm2.getPreviousMetronomeMark()), '<music21.tempo.MetronomeMark slow Quarter=56>')
#p.show()
def testGetPreviousMetronomeMarkC(self):
from music21 import stream
# test using a metric modulation
p = stream.Part()
m1 = stream.Measure()
m1.repeatAppend(note.Note(quarterLength=1), 4)
m2 = copy.deepcopy(m1)
m3 = copy.deepcopy(m2)
mm1 = MetronomeMark("slow")
m1.insert(0, mm1)
mm2 = MetricModulation()
mm2.oldMetronome = MetronomeMark(referent=1, number=52)
mm2.setOtherByReferent(referent='16th')
m2.insert(0, mm2)
mm3 = MetronomeMark(number=150, referent=.5)
m3.insert(0, mm3)
p.append([m1, m2, m3])
#p.show()
self.assertEqual(str(mm3.getPreviousMetronomeMark()), '<music21.tempo.MetronomeMark lento 16th=52>')
def testSetReferrentA(self):
'''Test setting referrents directly via context searches.
'''
from music21 import stream
p = stream.Part()
m1 = stream.Measure()
m1.repeatAppend(note.Note(quarterLength=1), 4)
m2 = copy.deepcopy(m1)
m3 = copy.deepcopy(m2)
mm1 = MetronomeMark(number=92)
m1.insert(0, mm1)
mm2 = MetricModulation()
m2.insert(0, mm2)
p.append([m1, m2, m3])
mm2.oldReferent = .25
self.assertEqual(str(mm2.oldMetronome),
'<music21.tempo.MetronomeMark moderate 16th=368.0>')
mm2.setOtherByReferent(referent=2)
self.assertEqual(str(mm2.newMetronome),
'<music21.tempo.MetronomeMark moderate Half=368.0>')
#p.show()
def testSetReferrentB(self):
from music21 import stream
s = stream.Stream()
mm1 = MetronomeMark(number=60)
s.append(mm1)
s.repeatAppend(note.Note(quarterLength=1), 2)
s.repeatAppend(note.Note(quarterLength=.5), 4)
mmod1 = MetricModulation()
mmod1.oldReferent = .5 # can use Duration objects
mmod1.newReferent = 'quarter' # can use Duration objects
s.append(mmod1)
mmod1.updateByContext()
self.assertEqual(str(mmod1.oldMetronome.referent), '<music21.duration.Duration 0.5>')
self.assertEqual(mmod1.oldMetronome.number, 120)
self.assertEqual(str(mmod1.newMetronome), '<music21.tempo.MetronomeMark animato Quarter=120.0>')
s.append(note.Note())
s.repeatAppend(note.Note(quarterLength=1.5), 2)
mmod2 = MetricModulation()
mmod2.oldReferent = 1.5
mmod2.newReferent = 'quarter' # can use Duration objects
s.append(mmod2)
mmod2.updateByContext()
self.assertEqual(str(mmod2.oldMetronome), '<music21.tempo.MetronomeMark animato Dotted Quarter=80.0>')
self.assertEqual(str(mmod2.newMetronome), '<music21.tempo.MetronomeMark andantino Quarter=80.0>')
#s.repeatAppend(note.Note(), 4)
#s.show()
def testSetReferrentC(self):
from music21 import stream
s = stream.Stream()
mm1 = MetronomeMark(number=60)
s.append(mm1)
s.repeatAppend(note.Note(quarterLength=1), 2)
s.repeatAppend(note.Note(quarterLength=.5), 4)
mmod1 = MetricModulation()
s.append(mmod1)
mmod1.oldReferent = .5 # can use Duration objects
mmod1.newReferent = 'quarter' # can use Duration objects
self.assertEqual(str(mmod1.oldMetronome.referent), '<music21.duration.Duration 0.5>')
self.assertEqual(mmod1.oldMetronome.number, 120)
self.assertEqual(str(mmod1.newMetronome), '<music21.tempo.MetronomeMark larghetto Quarter=120.0>')
s.append(note.Note())
s.repeatAppend(note.Note(quarterLength=1.5), 2)
mmod2 = MetricModulation()
s.append(mmod2)
mmod2.oldReferent = 1.5
mmod2.newReferent = 'quarter' # can use Duration objects
self.assertEqual(str(mmod2.oldMetronome), '<music21.tempo.MetronomeMark larghetto Dotted Quarter=80.0>')
self.assertEqual(str(mmod2.newMetronome), '<music21.tempo.MetronomeMark larghetto Quarter=80.0>')
s.repeatAppend(note.Note(), 4)
#s.show()
def testSetReferrentD(self):
from music21 import stream
s = stream.Stream()
mm1 = MetronomeMark(number=60)
s.append(mm1)
s.repeatAppend(note.Note(quarterLength=1), 2)
s.repeatAppend(note.Note(quarterLength=.5), 4)
mmod1 = MetricModulation()
s.append(mmod1)
# even with we have no assigned metronome, update context will create
mmod1.updateByContext()
self.assertEqual(str(mmod1.oldMetronome.referent), '<music21.duration.Duration 1.0>')
self.assertEqual(mmod1.oldMetronome.number, 60) # value form last mm
# still have not set new
self.assertEqual(mmod1.newMetronome, None)
mmod1.newReferent = .25
self.assertEqual(str(mmod1.newMetronome), '<music21.tempo.MetronomeMark larghetto 16th=60>')
s.append(note.Note())
s.repeatAppend(note.Note(quarterLength=1.5), 2)
def testSetReferrentE(self):
from music21 import stream
s = stream.Stream()
mm1 = | |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from copy import deepcopy
from flexmock import flexmock
from textwrap import dedent
import six
import time
import json
import logging
import inspect
import os
from osbs.http import HttpResponse
from osbs.constants import (BUILD_FINISHED_STATES, BUILD_CANCELLED_STATE,
OS_CONFLICT_MAX_RETRIES,
ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO)
from osbs.exceptions import (OsbsResponseException, OsbsException,
OsbsNetworkException, OsbsWatchBuildNotFound)
from osbs.core import check_response, Openshift
from tests.constants import (TEST_BUILD, TEST_CANCELLED_BUILD, TEST_LABEL,
TEST_LABEL_VALUE, TEST_IMAGESTREAM, TEST_IMAGESTREAM_NO_TAGS,
TEST_IMAGESTREAM_WITH_ANNOTATION)
from tests.fake_api import openshift, OAPI_PREFIX, API_VER # noqa
from tests.test_utils import JsonMatcher
from requests.exceptions import ConnectionError
import pytest
from six.moves import http_client
class Response(object):
def __init__(self, status_code, content=None, iterable=None):
self.status_code = status_code
self.iterable = iterable
if content is not None:
self.content = content
def iter_lines(self):
for line in self.iterable:
yield line
def make_json_response(obj):
return HttpResponse(200,
headers={"Content-Type": "application/json"},
content=json.dumps(obj).encode('utf-8'))
class TestCheckResponse(object):
@pytest.mark.parametrize('content', [None, b'OK'])
@pytest.mark.parametrize('status_code', [http_client.OK, http_client.CREATED])
def test_check_response_ok(self, status_code, content):
response = Response(status_code, content=content)
check_response(response)
@pytest.mark.parametrize('log_errors', (True, False))
def test_check_response_bad_stream(self, caplog, log_errors):
iterable = [b'iter', b'lines']
status_code = http_client.CONFLICT
response = Response(status_code, iterable=iterable)
if log_errors:
log_type = logging.ERROR
else:
log_type = logging.DEBUG
with pytest.raises(OsbsResponseException):
if log_errors:
check_response(response)
else:
check_response(response, log_level=log_type)
logged = [(l.getMessage(), l.levelno) for l in caplog.records()]
assert len(logged) == 1
assert logged[0][0] == '[{code}] {message}'.format(code=status_code,
message=b'iterlines')
assert logged[0][1] == log_type
@pytest.mark.parametrize('log_errors', (True, False))
def test_check_response_bad_nostream(self, caplog, log_errors):
status_code = http_client.CONFLICT
content = b'content'
response = Response(status_code, content=content)
if log_errors:
log_type = logging.ERROR
else:
log_type = logging.DEBUG
with pytest.raises(OsbsResponseException):
if log_errors:
check_response(response)
else:
check_response(response, log_level=log_type)
logged = [(l.getMessage(), l.levelno) for l in caplog.records()]
assert len(logged) == 1
assert logged[0][0] == '[{code}] {message}'.format(code=status_code,
message=content)
assert logged[0][1] == log_type
class TestOpenshift(object):
def test_set_labels_on_build(self, openshift): # noqa
labels = openshift.set_labels_on_build(TEST_BUILD, {TEST_LABEL: TEST_LABEL_VALUE})
assert labels.json() is not None
@pytest.mark.parametrize('exc', [ # noqa
ConnectionError('Connection aborted.', http_client.BadStatusLine("''",)),
])
def test_stream_logs_bad_initial_connection(self, openshift, exc):
response = flexmock(status_code=http_client.OK)
(response
.should_receive('iter_lines')
.and_return([b"{'stream': 'foo\n'}"])
.and_raise(StopIteration))
wrapped_exc = OsbsNetworkException('http://spam.com', str(exc), status_code=None,
cause=exc)
(flexmock(openshift)
.should_receive('_get')
# First: simulate initial connection problem
.and_raise(wrapped_exc)
# Next: return a real response
.and_return(response))
(flexmock(time)
.should_receive('time')
.and_return(0)
.and_return(100))
logs = openshift.stream_logs(TEST_BUILD)
assert len([log for log in logs]) == 1
def test_stream_logs_utf8(self, openshift): # noqa
response = flexmock(status_code=http_client.OK)
(response
.should_receive('iter_lines')
.and_return([u"{'stream': 'Uňícode íš hářd\n'}".encode('utf-8')])
.and_raise(StopIteration))
(flexmock(openshift)
.should_receive('_get')
.and_return(response))
logs = openshift.stream_logs(TEST_BUILD)
assert len([log for log in logs]) == 1
def test_list_builds(self, openshift): # noqa
list_builds = openshift.list_builds()
assert list_builds is not None
assert bool(list_builds.json()) # is there at least something
def test_list_pods(self, openshift): # noqa
response = openshift.list_pods(label="openshift.io/build.name=%s" %
TEST_BUILD)
assert isinstance(response, HttpResponse)
def test_get_oauth_token(self, openshift): # noqa
token = openshift.get_oauth_token()
assert token is not None
def test_get_user(self, openshift): # noqa
l = openshift.get_user()
assert l.json() is not None
def test_watch_resource_and_wait_to_build_timeouts(self, caplog, openshift): # noqa:F811
class MockResponse(object):
def __init__(self):
self.status_code = http_client.OK
def iter_lines(self):
return []
mock_reponse = MockResponse()
flexmock(openshift).should_receive('_get').and_return(mock_reponse)
flexmock(time).should_receive('sleep').and_return(None)
for changetype, obj in openshift.watch_resource("builds", 12):
# watch_resource failed and never yielded, so we shouldn't hit the assert
assert False
with pytest.raises(OsbsWatchBuildNotFound):
openshift.wait(12, None)
with pytest.raises(OsbsException):
openshift.wait_for_build_to_finish(12)
for log in caplog.records():
if 'Retry #143' in log.getMessage():
break
assert 'Retry #143' in log.getMessage()
def test_watch_build(self, openshift): # noqa
response = openshift.wait_for_build_to_finish(TEST_BUILD)
status_lower = response["status"]["phase"].lower()
assert response["metadata"]["name"] == TEST_BUILD
assert status_lower in BUILD_FINISHED_STATES
assert isinstance(TEST_BUILD, six.text_type)
assert isinstance(status_lower, six.text_type)
def test_create_build(self, openshift): # noqa
response = openshift.create_build({})
assert response is not None
assert response.json()["metadata"]["name"] == TEST_BUILD
assert response.json()["status"]["phase"].lower() in BUILD_FINISHED_STATES
def test_cancel_build(self, openshift): # noqa
response = openshift.cancel_build(TEST_CANCELLED_BUILD)
assert response is not None
assert response.json()["metadata"]["name"] == TEST_CANCELLED_BUILD
assert response.json()["status"]["phase"].lower() in BUILD_CANCELLED_STATE
def test_get_build_config(self, openshift): # noqa
mock_response = {"spam": "maps"}
build_config_name = 'some-build-config-name'
expected_url = openshift._build_url("buildconfigs/%s/" % build_config_name)
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
response = openshift.get_build_config(build_config_name)
assert response['spam'] == 'maps'
def test_get_missing_build_config(self, openshift): # noqa
build_config_name = 'some-build-config-name'
expected_url = openshift._build_url("buildconfigs/%s/" % build_config_name)
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(HttpResponse(404, {}, b'')))
with pytest.raises(OsbsResponseException):
openshift.get_build_config(build_config_name)
def test_get_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": [{"spam": "maps"}]}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
response = openshift.get_build_config_by_labels(label_selectors)
assert response['spam'] == 'maps'
def test_get_missing_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": []}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
with pytest.raises(OsbsException) as exc:
openshift.get_build_config_by_labels(label_selectors)
assert str(exc.value).startswith('Build config not found')
def test_get_multiple_build_config_by_labels(self, openshift): # noqa
mock_response = {"items": [{"spam": "maps"}, {"eggs": "sgge"}]}
label_selectors = (
('label-1', 'value-1'),
('label-2', 'value-2'),
)
expected_url = openshift._build_url(
"buildconfigs/?labelSelector=label-1%3Dvalue-1%2Clabel-2%3Dvalue-2")
(flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once()
.and_return(make_json_response(mock_response)))
with pytest.raises(OsbsException) as exc:
openshift.get_build_config_by_labels(label_selectors)
assert str(exc.value).startswith('More than one build config found')
@pytest.mark.parametrize(('status_codes', 'should_raise'), [ # noqa
([http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.UNAUTHORIZED], True),
([http_client.UNAUTHORIZED], True),
([http_client.CONFLICT for _ in range(OS_CONFLICT_MAX_RETRIES + 1)], True),
])
@pytest.mark.parametrize('update_or_set', ['update', 'set'])
@pytest.mark.parametrize('attr_type', ['labels', 'annotations'])
@pytest.mark.parametrize('object_type', ['build', 'build_config'])
def test_retry_update_attributes(self, openshift,
status_codes, should_raise,
update_or_set,
attr_type,
object_type):
try:
fn = getattr(openshift,
"{update}_{attr}_on_{object}"
.format(update=update_or_set,
attr=attr_type,
object=object_type))
except AttributeError:
return # not every combination is implemented
get_expectation = (flexmock(openshift)
.should_receive('_get')
.times(len(status_codes)))
put_expectation = (flexmock(openshift)
.should_receive('_put')
.times(len(status_codes)))
for status_code in status_codes:
get_response = make_json_response({"metadata": {}})
put_response = HttpResponse(status_code,
headers={},
content=b'')
get_expectation = get_expectation.and_return(get_response)
put_expectation = put_expectation.and_return(put_response)
(flexmock(time)
.should_receive('sleep')
.and_return(None))
args = ('any-object-id', {'key': 'value'})
if should_raise:
with pytest.raises(OsbsResponseException):
fn(*args)
else:
fn(*args)
def test_put_image_stream_tag(self, openshift): # noqa
tag_name = 'spam'
tag_id = 'maps:' + tag_name
mock_data = {
'kind': 'ImageStreamTag',
'apiVersion': 'v1',
'tag': {
'name': tag_name
}
}
expected_url = openshift._build_url('imagestreamtags/' + tag_id)
(flexmock(openshift)
.should_receive("_put")
.with_args(expected_url, data=json.dumps(mock_data),
headers={"Content-Type": "application/json"})
.once()
.and_return(make_json_response(mock_data)))
openshift.put_image_stream_tag(tag_id, mock_data)
def _make_tag_template(self):
# TODO: Just read from inputs folder
return json.loads(dedent('''\
{
"kind": "ImageStreamTag",
"apiVersion": "v1",
"metadata": {
"name": "{{IMAGE_STREAM_ID}}:{{TAG_ID}}"
},
"tag": {
"name": "{{TAG_ID}}",
"from": {
"kind": "DockerImage",
"name": "{{REPOSITORY}}:{{TAG_ID}}"
},
"importPolicy": {}
}
}
'''))
@pytest.mark.parametrize('existing_scheduled', (True, False, None)) # noqa
@pytest.mark.parametrize('existing_insecure', (True, False, None))
@pytest.mark.parametrize('expected_scheduled', (True, False))
@pytest.mark.parametrize(('s_annotations', 'expected_insecure'), (
({ANNOTATION_INSECURE_REPO: 'true'}, True),
({ANNOTATION_INSECURE_REPO: 'false'}, False),
({}, False),
(None, False),
))
@pytest.mark.parametrize('status_code', (200, 404, 500))
def test_ensure_image_stream_tag(self,
existing_scheduled,
existing_insecure,
expected_scheduled,
s_annotations,
expected_insecure,
status_code,
openshift):
stream_name = 'spam'
stream_repo = 'some.registry.com/spam'
stream = {
'metadata': {'name': stream_name},
'spec': {'dockerImageRepository': stream_repo}
}
if s_annotations is not None:
stream['metadata']['annotations'] = s_annotations
tag_name = 'maps'
tag_id = '{0}:{1}'.format(stream_name, tag_name)
expected_url = openshift._build_url('imagestreamtags/' +
tag_id)
def verify_image_stream_tag(*args, **kwargs):
data = json.loads(kwargs['data'])
assert (bool(data['tag']['importPolicy'].get('insecure')) ==
expected_insecure)
assert (bool(data['tag']['importPolicy'].get('scheduled')) ==
expected_scheduled)
# Also verify new image stream tags are created properly.
if status_code == 404:
assert data['metadata']['name'] == tag_id
assert data['tag']['name'] == tag_name
assert (data['tag']['from']['name'] ==
'{0}:{1}'.format(stream_repo, tag_name))
return make_json_response({})
expected_change = False
expected_error = status_code == 500
mock_response = {}
expectation = (flexmock(openshift)
.should_receive("_get")
.with_args(expected_url)
.once())
if status_code == 200:
existing_image_stream_tag = {'tag': {'importPolicy': {}}}
if existing_insecure is not None:
existing_image_stream_tag['tag']['importPolicy']['insecure'] = \
existing_insecure
if existing_scheduled is not None:
existing_image_stream_tag['tag']['importPolicy']['scheduled'] = \
existing_scheduled
mock_response = existing_image_stream_tag
if expected_insecure != bool(existing_insecure) or \
expected_scheduled != bool(existing_scheduled):
expected_change = True
expectation.and_return(make_json_response(mock_response))
else:
expectation.and_return(HttpResponse(status_code,
headers={},
content=b''))
if status_code == 404:
expected_change = True
if expected_change:
(flexmock(openshift)
.should_receive("_put")
.with_args(expected_url, data=str,
headers={"Content-Type": "application/json"})
.replace_with(verify_image_stream_tag)
.once())
if expected_error:
with pytest.raises(OsbsResponseException):
openshift.ensure_image_stream_tag(
stream, tag_name, self._make_tag_template(), expected_scheduled)
else:
assert (openshift.ensure_image_stream_tag(
stream,
tag_name,
self._make_tag_template(),
expected_scheduled) == expected_change)
@pytest.mark.parametrize(('status_codes', 'should_raise'), [ # noqa
([http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.OK], False),
([http_client.CONFLICT, http_client.CONFLICT, http_client.UNAUTHORIZED], True),
([http_client.UNAUTHORIZED], True),
([http_client.CONFLICT for _ in range(OS_CONFLICT_MAX_RETRIES + 1)], True),
])
def test_retry_ensure_image_stream_tag(self, openshift,
status_codes, should_raise):
get_expectation = (flexmock(openshift)
.should_receive('_get')
.times(len(status_codes)))
put_expectation = (flexmock(openshift)
.should_receive('_put')
.times(len(status_codes)))
for status_code in status_codes:
get_response = HttpResponse(http_client.NOT_FOUND,
headers={},
content=b'')
put_response = HttpResponse(status_code,
headers={},
content=b'')
get_expectation = get_expectation.and_return(get_response)
put_expectation = put_expectation.and_return(put_response)
(flexmock(time)
.should_receive('sleep')
.and_return(None))
fn = openshift.ensure_image_stream_tag
args = (
{
'kind': 'ImageStream',
'metadata': {
'name': 'imagestream',
},
'spec': {
'dockerImageRepository': 'registry.example.com/repo',
},
},
'tag',
{
'kind': 'ImageStreamTag',
'metadata': {
'name': 'imagestream:tag',
},
'tag': {
'name': 'tag',
'from': {
'kind': 'DockerImage',
'name': 'registry.example.com/repo:tag',
},
'importPolicy': {},
},
})
if should_raise:
with pytest.raises(OsbsResponseException):
fn(*args)
else:
fn(*args)
@pytest.mark.parametrize(('kwargs', 'called'), (
({'use_auth': True, 'use_kerberos': True}, False),
({'use_auth': True, 'username': 'foo', 'password': '<PASSWORD>'}, False),
({'use_auth': True, 'token': 'foo'}, False),
({'use_auth': False, 'use_kerberos': True}, False),
({'use_auth': False, 'username': 'foo', 'password': '<PASSWORD>'}, False),
({'use_auth': False, 'token': 'foo'}, False),
({'use_kerberos': True}, False),
({'username': 'foo', 'password': '<PASSWORD>'}, False),
({'token': 'foo'}, False),
({'use_auth': | |
from os import path, sys
import os
import unittest
from models.dojo import Dojo
os.sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
class TestDojo(unittest.TestCase):
"""Test cases for the Dojo class"""
def setUpClass():
if os.path.isfile('data/db/fellow.db'):
os.remove(os.path.realpath("data/db/fellow.db"))
if os.path.isfile('data/db/office.db'):
os.remove(os.path.realpath("data/db/office.db"))
if os.path.isfile('data/db/livingspace.db'):
os.remove(os.path.realpath("data/db/livingspace.db"))
if os.path.isfile('data/db/staff.db'):
os.remove(os.path.realpath("data/db/staff.db"))
def test_random_room(self):
"""Return None because there is no room currently"""
rand_room = Dojo().get_random_room(Dojo().office_data)
self.assertFalse(rand_room)
def test_available_room(self):
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion', 'Meraki']
}
Dojo().create_room(arg)
available_rooms = Dojo().get_available_room(Dojo().office_data)
isOrion = 'ORION' in available_rooms
isMeraki = 'MERAKI' in available_rooms
self.assertTrue(isMeraki)
self.assertTrue(isOrion)
def test_purge_office(self):
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion', 'Meraki']
}
Dojo().create_room(arg)
Dojo().purge()
self.assertFalse('MERAKI' in Dojo().office_data)
self.assertFalse('ORION' in Dojo().office_data)
def test_purge_livingspace(self):
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Piper', 'Idanre']
}
Dojo().create_room(arg)
Dojo().purge()
self.assertFalse('IDANRE' in Dojo().livingspace_data)
self.assertFalse('PIPER' in Dojo().livingspace_data)
def test_purge_fellow(self):
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW"
}
Dojo().add_person(arg)
Dojo().purge()
self.assertFalse('<NAME>' in Dojo().fellow_data)
def test_purge_staff(self):
arg = {
"<person_fname>": "Percila",
"<person_lname>": "Njira",
"<FELLOW/STAFF>": "STAFF"
}
Dojo().add_person(arg)
Dojo().purge()
self.assertFalse('<NAME>' in Dojo().staff_data)
def test_existing_room(self):
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Pygo']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Pygo']
}
Dojo().create_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The OFFICE, PYGO already exists!")
def test_invalid_room(self):
Dojo().purge()
arg = {
"<room_type>": 'CAR WASH',
"<room_name>": ['Pygo']
}
Dojo().create_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "Invalid Room Type. Room type can either be \'office\' or \'livingspace\'")
def test_existing_staff(self):
arg = {
"<person_fname>": "Percila",
"<person_lname>": "Njira",
"<FELLOW/STAFF>": "STAFF"
}
Dojo().add_person(arg)
arg = {
"<person_fname>": "Percila",
"<person_lname>": "Njira",
"<FELLOW/STAFF>": "STAFF"
}
Dojo().add_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The STAFF, PERCILA NJIRA already exists.")
def test_existing_fellow(self):
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW"
}
Dojo().add_person(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW"
}
Dojo().add_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The FELLOW, BOLAJI OLAJIDE already exists.")
def test_invalid_position(self):
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "CATERER"
}
Dojo().add_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "Error! Individual must be either a fellow or a staff.")
def test_allocate_staff_no_office(self):
Dojo().purge()
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "STAFF",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "There is currently no vacant office in the Dojo")
def test_allocate_fellow_no_office(self):
Dojo().purge()
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-2]
self.assertEqual(
output, "There is currently no vacant office in the Dojo")
def test_allocate_fellow_no_livingspace(self):
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "There is currently no vacant Living Space in the Dojo")
def test_allocate_staff_with_accomodation(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Piper']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "STAFF",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
output2 = sys.stdout.getvalue().strip().split("\n")[-2]
self.assertEqual(
output, "STAFF Members cannot be allocated Living Space.")
self.assertEqual(
output2, "BOLAJI OLAJIDE has been allocated the Office, ORION")
def test_allocate_fellow_with_accomodation(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Piper']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
output2 = sys.stdout.getvalue().strip().split("\n")[-2]
self.assertEqual(
output, "BOLAJI OLAJIDE has been allocated the Living Space, PIPER")
self.assertEqual(
output2, "BOLAJI OLAJIDE has been allocated the Office, ORION")
def test_allocate_fellow_no_accomodation(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "N"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "BOLAJI OLAJIDE has been allocated the Office, ORION")
def test_allocate_fellow_no_accomodation_no_office(self):
Dojo().purge()
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "N"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "There is currently no vacant office in the Dojo")
def test_allocate_staff_no_accomodation(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "STAFF",
"<wants_accommodation>": "N"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "BOLAJI OLAJIDE has been allocated the Office, ORION")
def test_print_non_existing_room(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion', 'Meraki', 'Piper']
}
Dojo().create_room(arg)
arg = {
"<room_name>": 'Nairobi'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The Room, NAIROBI doesn't exist.")
def test_print_empty_office(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion', 'Meraki', 'Piper']
}
Dojo().create_room(arg)
arg = {
"<room_name>": 'Orion'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The Office ORION is empty")
def test_print_room_office(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": "FELLOW",
"<wants_accommodation>": "N"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
arg = {
"<room_name>": 'Orion'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "BOLAJI OLAJIDE")
def test_print_room_livingspace(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": 'FELLOW',
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
arg = {
"<room_name>": 'Orion'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "BOLAJI OLAJIDE")
def test_print_room_livingspace_office(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Bolaji",
"<person_lname>": "Olajide",
"<FELLOW/STAFF>": 'FELLOW',
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
arg = {
"<room_name>": 'Orion'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-5]
output2 = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "BOLAJI OLAJIDE")
self.assertEqual(output2, "BOLAJI OLAJIDE")
def test_print_empty_livingspace(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Meraki']
}
Dojo().create_room(arg)
arg = {
"<room_name>": 'Meraki'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The Living Space MERAKI is empty")
def test_print_room_empty(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_name>": 'Orion'
}
Dojo().print_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-2]
output2 = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The Office ORION is empty")
self.assertEqual(output2, "The Living Space ORION is empty")
def test_existing_livingspace(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "The LIVINGSPACE, ORION already exists!")
def test_load_people_(self):
Dojo().purge()
arg = {
"<file_name>": 'test_input'
}
Dojo().load_people(arg)
self.assertTrue('<NAME>' in Dojo().fellow_data)
self.assertTrue('<NAME>' in Dojo().staff_data)
arg = {
"<file_name>": 'test_input'
}
Dojo().load_people(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertTrue(output, "Invalid Argument Format!")
def test_reallocate_fellow(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Meraki', 'Piper']
}
Dojo().create_room(arg)
arg = {
"<file_name>": 'test_input'
}
Dojo().load_people(arg)
arg = {
'<person_fname>': 'Brian',
'<person_lname>': 'Mosigisi'
}
Dojo().get_person_id(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
fellow_id = output[-8:-1]
if Dojo().fellow_data['<NAME>'].office == 'MERAKI':
arg = {
'<new_room_name>': 'Piper',
'<person_identifier>': fellow_id
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "<NAME> has been reallocated to the Office PIPER")
else:
arg = {
'<new_room_name>': 'Meraki',
'<person_identifier>': fellow_id
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "<NAME> has been reallocated to the Office MERAKI")
def test_reallocate_invalid_id(self):
arg = {
'<new_room_name>': 'Meraki',
'<person_identifier>': 'F00'
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "No fellow in the Dojo with the id: F00")
def test_reallocate_staff(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Meraki', 'Piper']
}
Dojo().create_room(arg)
arg = {
"<person_fname>": "Percila",
"<person_lname>": "Njira",
"<FELLOW/STAFF>": "STAFF",
"<wants_accommodation>": "Y"
}
Dojo().add_person(arg)
Dojo().allocate_room(arg)
arg = {
'<person_fname>': 'Percila',
'<person_lname>': 'Njira'
}
Dojo().get_person_id(arg)
id_output = sys.stdout.getvalue().strip().split("\n")[-1]
staff_id = id_output[-8:]
if Dojo().staff_data['<NAME>'].office == 'MERAKI':
arg = {
'<new_room_name>': 'Piper',
'<person_identifier>': staff_id
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output,
"<NAME> has been reallocated to the Office PIPER")
else:
arg = {
'<new_room_name>': 'Meraki',
'<person_identifier>': staff_id
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output,
"<NAME> has been reallocated to the Office MERAKI")
def test_reallocate_staff_invalid_id(self):
Dojo().purge()
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Meraki']
}
Dojo().create_room(arg)
arg = {
'<new_room_name>': 'Meraki',
'<person_identifier>': 'L000'
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(
output, "Invalid Identifier")
def tearDown(self):
if os.path.isfile('data/db/fellow.db'):
os.remove(os.path.realpath("data/db/fellow.db"))
if os.path.isfile('data/db/office.db'):
os.remove(os.path.realpath("data/db/office.db"))
if os.path.isfile('data/db/livingspace.db'):
os.remove(os.path.realpath("data/db/livingspace.db"))
if os.path.isfile('data/db/staff.db'):
os.remove(os.path.realpath("data/db/staff.db"))
def test_reallocate_same_room(self):
Dojo().purge()
arg = {
"<room_type>": 'LIVINGSPACE',
"<room_name>": ['Orion']
}
Dojo().create_room(arg)
arg = {
"<room_type>": 'OFFICE',
"<room_name>": ['Meraki']
}
Dojo().create_room(arg)
arg = {
"<file_name>": 'test_input'
}
Dojo().load_people(arg)
arg = {
'<person_fname>': 'Brian',
'<person_lname>': 'Mosigisi'
}
Dojo().get_person_id(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
fellow_id = output[-8:-1]
arg = {
'<new_room_name>': 'Meraki',
'<person_identifier>': fellow_id
}
Dojo().reallocate_person(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, "You cannot relocate to the same room")
def test_get_invalid_id(self):
Dojo().purge()
arg = {
"<person_fname>": 'Bolaji',
"<person_lname>": 'Olajide'
}
Dojo().get_person_id(arg)
output = sys.stdout.getvalue().strip().split("\n")[-1]
self.assertEqual(output, | |
nodes to upi cluster
Args:
node_list (list): of AWSUPINode objects with RHEL os
"""
rhel_pod_name = "rhel-ansible"
rhel_pod_obj = create_rhelpod(constants.DEFAULT_NAMESPACE, rhel_pod_name, 600)
timeout = 4000 # For ansible-playbook
# copy openshift-dev.pem to RHEL ansible pod
pem_src_path = "~/.ssh/openshift-dev.pem"
pem_dst_path = "/openshift-dev.pem"
pod.upload(rhel_pod_obj.name, pem_src_path, pem_dst_path)
repo_dst_path = constants.YUM_REPOS_PATH
repo = os.path.join(constants.REPO_DIR, f"ocp_{get_ocp_version('_')}.repo")
assert os.path.exists(repo), f"Required repo file {repo} doesn't exist!"
repo_file = os.path.basename(repo)
pod.upload(rhel_pod_obj.name, repo, repo_dst_path)
# copy the .pem file for our internal repo on all nodes
# including ansible pod
# get it from URL
mirror_pem_file_path = os.path.join(
constants.DATA_DIR, constants.INTERNAL_MIRROR_PEM_FILE
)
dst = constants.PEM_PATH
pod.upload(rhel_pod_obj.name, mirror_pem_file_path, dst)
# Install scp on pod
rhel_pod_obj.install_packages("openssh-clients")
# distribute repo file to all RHEL workers
hosts = [node.aws_instance_obj.private_dns_name for node in node_list]
# Check whether every host is acceptin ssh connections
for host in hosts:
self.check_connection(rhel_pod_obj, host, pem_dst_path)
for host in hosts:
disable = "sudo yum-config-manager --disable *"
rhel_pod_obj.exec_cmd_on_node(
host, pem_dst_path, disable, user=constants.EC2_USER
)
rhel_pod_obj.copy_to_server(
host,
pem_dst_path,
os.path.join(repo_dst_path, repo_file),
os.path.join("/tmp", repo_file),
user=constants.EC2_USER,
)
rhel_pod_obj.exec_cmd_on_node(
host,
pem_dst_path,
f'sudo mv {os.path.join("/tmp", repo_file)} {repo_dst_path}',
user=constants.EC2_USER,
)
rhel_pod_obj.copy_to_server(
host,
pem_dst_path,
os.path.join(dst, constants.INTERNAL_MIRROR_PEM_FILE),
os.path.join("/tmp", constants.INTERNAL_MIRROR_PEM_FILE),
user=constants.EC2_USER,
)
cmd = (
f"sudo mv "
f'{os.path.join("/tmp/", constants.INTERNAL_MIRROR_PEM_FILE)} '
f"{dst}"
)
rhel_pod_obj.exec_cmd_on_node(
host, pem_dst_path, cmd, user=constants.EC2_USER
)
# copy kubeconfig to pod
kubeconfig = os.path.join(
self.cluster_path, config.RUN.get("kubeconfig_location")
)
pod.upload(rhel_pod_obj.name, kubeconfig, "/")
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
pod.upload(rhel_pod_obj.name, pull_secret_path, "/tmp/")
host_file = self.build_ansible_inventory(hosts)
pod.upload(rhel_pod_obj.name, host_file, "/")
# install pod packages
rhel_pod_obj.install_packages(constants.RHEL_POD_PACKAGES)
# run ansible
try:
cmd = (
f"ansible-playbook -i /hosts --private-key={pem_dst_path} "
f"{constants.SCALEUP_ANSIBLE_PLAYBOOK}"
)
rhel_pod_obj.exec_cmd_on_pod(cmd, out_yaml_format=False, timeout=timeout)
self.verify_nodes_added(hosts)
finally:
rhel_pod_obj.delete(force=True)
def verify_nodes_added(self, hosts):
"""
Verify RHEL workers are added
Args:
hosts (list): list of aws private hostnames
Raises:
FailedToAddNodeException: if node addition failed
"""
timeout = 600
ocp_obj = ocp.OCP(kind="node")
node_info = ocp_obj.get()
for i in range(len(hosts)):
for entry in node_info["items"]:
for each in entry["status"]["addresses"]:
if each["type"] == "Hostname":
if each["address"] in hosts:
logging.info(f"Checking status for {each['address']}")
sample = TimeoutSampler(
timeout, 3, self.get_ready_status, entry
)
try:
assert sample.wait_for_func_status(result=True)
except AssertionError:
raise exceptions.FailedToAddNodeException(
"Failed to add RHEL node"
)
def get_ready_status(self, node_info):
"""
Get the node 'Ready' status
Args:
node_info (dict): Node info which includes details
Returns:
bool: True if node is Ready else False
"""
for cond in node_info["status"]["conditions"]:
if cond["type"] == "Ready":
if not cond["status"] == "True":
return False
else:
return True
def build_ansible_inventory(self, hosts):
"""
Build the ansible hosts file from jinja template
Args:
hosts (list): list of private host names
Returns:
str: path of the ansible file created
"""
_templating = templating.Templating()
ansible_host_file = dict()
ansible_host_file["ansible_user"] = constants.EC2_USER
ansible_host_file["ansible_become"] = "True"
ansible_host_file["pod_kubeconfig"] = "/kubeconfig"
ansible_host_file["pod_pull_secret"] = "/tmp/pull-secret"
ansible_host_file["rhel_worker_nodes"] = hosts
logging.info(ansible_host_file)
data = _templating.render_template(
constants.ANSIBLE_INVENTORY_YAML,
ansible_host_file,
)
logging.debug("Ansible hosts file:%s", data)
host_file_path = "/tmp/hosts"
with open(host_file_path, "w") as f:
f.write(data)
return host_file_path
@retry(exceptions.CommandFailed, tries=15, delay=30, backoff=1)
def check_connection(self, rhel_pod_obj, host, pem_dst_path):
"""
Check whether newly brought up RHEL instances are accepting
ssh connections
Args:
rhel_pod_obj (Pod): object for handling ansible pod
host (str): Node to which we want to try ssh
pem_dst_path (str): path to private key for ssh
"""
cmd = "ls"
rhel_pod_obj.exec_cmd_on_node(host, pem_dst_path, cmd, user=constants.EC2_USER)
def get_stack_name_of_node(self, node_name):
"""
Get the stack name of a given node
Args:
node_name (str): the name of the node
Returns:
str: The stack name of the given node
"""
instance_id = self.aws.get_instance_id_from_private_dns_name(node_name)
stack_name = self.aws.get_stack_name_by_instance_id(instance_id)
return stack_name
class AWSUPINode(AWSNodes):
"""
Node object representing AWS upi nodes
"""
def __init__(self, node_conf, node_type):
super(AWSUPINode, self).__init__()
self.node_conf = node_conf
# RHEL/RHCOS
self.node_type = node_type
# This variable will hold the AWS instance object
self.aws_instance_obj = None
self.region = config.ENV_DATA["region"]
self.cluster_name = get_cluster_name(self.cluster_path)
self.client = boto3.client("ec2", region_name=self.region)
# cloudformation
self.cf = self.aws.cf_client
self.infra_id = get_infra_id(self.cluster_path)
def _prepare_node(self, node_id):
"""
Create AWS instance of the node
Args:
node_id (int): Unique integer id for node
"""
if self.node_type == "RHEL":
conf = self._prepare_rhel_node_conf()
conf["node_id"] = node_id
try:
self.aws_instance_obj = self._prepare_upi_rhel_node(conf)
except Exception:
logger.error("Failed to create RHEL node")
raise
elif self.node_type == "RHCOS":
conf = self._prepare_rhcos_node_conf()
conf["node_id"] = node_id
try:
self.aws_instance_obj = self._prepare_upi_rhcos_node(conf)
except Exception:
logger.error("Failed to create RHCOS node")
raise
approve_pending_csr()
def _prepare_rhcos_node_conf(self):
"""
Merge default RHCOS node configuration for rhcos node
along with the user provided config
Returns:
dict: A dictionary of merged user and default values
"""
conf = self.read_default_config(constants.RHCOS_WORKER_CONF)
default_conf = conf.get("ENV_DATA")
merge_dict(default_conf, self.node_conf)
logger.info(f"Config after merge is {default_conf}")
return default_conf
def _prepare_upi_rhcos_node(self, conf):
"""
Handle RHCOS worker instance creation using cloudformation template,
Create RHCOS instance with ami same as master
Args:
conf (dict): configuration for node
Returns:
boto3.Instance: instance of ec2 instance resource
"""
logger.info(f"new rhcos node conf = {conf}")
stack_name = conf.get("stack_name")
if conf.get("stack_name"):
suffix = stack_name.split("-")[-1]
else:
suffix = f"no{conf.get('zone')}"
self.gather_worker_data(suffix)
worker_template_path = self.get_rhcos_worker_template()
self.bucket_name = constants.AWS_S3_UPI_BUCKET
self.template_obj_key = f"{self.cluster_name}-workertemplate"
self.add_cert_to_template(worker_template_path)
self.aws.upload_file_to_s3_bucket(
self.bucket_name, self.template_obj_key, worker_template_path
)
s3_url = self.aws.get_s3_bucket_object_url(
self.bucket_name, self.template_obj_key
)
params_list = self.build_stack_params(conf["node_id"], conf)
capabilities = ["CAPABILITY_NAMED_IAM"]
self.stack_name, self.stack_id = self.aws.create_stack(
s3_url, conf["node_id"], params_list, capabilities
)
instance_id = self.aws.get_stack_instance_id(
self.stack_name, constants.AWS_WORKER_LOGICAL_RESOURCE_ID
)
delete_file(worker_template_path)
self.aws.delete_s3_object(self.bucket_name, self.template_obj_key)
return self.aws.get_ec2_instance(instance_id)
def build_stack_params(self, index, conf):
"""
Build all the params required for a stack creation
Args:
index (int): An integer index for this stack
conf (dict): Node config
Returns:
list: of param dicts
"""
param_list = []
pk = "ParameterKey"
pv = "ParameterValue"
param_list.append({pk: "Index", pv: str(index)})
param_list.append({pk: "InfrastructureName", pv: self.infra_id})
param_list.append({pk: "RhcosAmi", pv: self.worker_image_id})
param_list.append({pk: "IgnitionLocation", pv: self.worker_ignition_location})
param_list.append({pk: "Subnet", pv: self.worker_subnet})
param_list.append(
{
pk: "WorkerSecurityGroupId",
pv: self.worker_security_group[0].get("GroupId"),
}
)
param_list.append(
{pk: "WorkerInstanceProfileName", pv: self.worker_instance_profile}
)
param_list.append({pk: "WorkerInstanceType", pv: conf["worker_instance_type"]})
return param_list
def add_cert_to_template(self, worker_template_path):
"""
Add cert to worker template
Args:
worker_template_path (str): Path where template file is located
"""
worker_ignition_path = os.path.join(self.cluster_path, constants.WORKER_IGN)
cert = self.get_cert_content(worker_ignition_path)
self.update_template_with_cert(worker_template_path, cert)
def update_template_with_cert(self, worker_template_path, cert):
"""
Update the template file with cert provided
Args:
worker_template_path (str): template file path
cert (str): Certificate body
"""
search_str = "ABC...xYz=="
temp = "/tmp/worker_temp.yaml"
with open(worker_template_path, "r") as fp:
orig_content = fp.read()
logger.info("=====ORIGINAL=====")
logger.info(orig_content)
final_content = re.sub(
r"{}".format(search_str), r"{}".format(cert), orig_content
)
with open(temp, "w") as wfp:
logger.info(final_content)
wfp.write(final_content)
os.rename(temp, worker_template_path)
def get_cert_content(self, worker_ignition_path):
"""
Get the certificate content from worker ignition file
Args:
worker_ignition_path (str): Path of the worker ignition file
Returns:
formatted_cert (str): certificate content
"""
assert os.path.exists(worker_ignition_path)
with open(worker_ignition_path, "r") as fp:
content = json.loads(fp.read())
tls_data = content.get("ignition").get("security").get("tls")
cert_content = tls_data.get("certificateAuthorities")[0].get("source")
formatted_cert = cert_content.split(",")[1]
return formatted_cert
def get_rhcos_worker_template(self):
"""
Download template and keep it locally
Returns:
path (str): local path to template file
"""
common_base = "functionality-testing"
ocp_version = get_ocp_version("_")
relative_template_path = os.path.join(
f"aos-{ocp_version}", "hosts/upi_on_aws-cloudformation-templates"
)
path_to_file = os.path.join(
f"{common_base}",
f"{relative_template_path}",
f"{constants.AWS_WORKER_NODE_TEMPLATE}",
)
logger.info(
f"Getting file '{path_to_file}' from "
f"git repository {constants.OCP_QE_MISC_REPO}"
)
tmp_file = os.path.join("/tmp", constants.AWS_WORKER_NODE_TEMPLATE)
download_file_from_git_repo(constants.OCP_QE_MISC_REPO, path_to_file, tmp_file)
return tmp_file
def _prepare_rhel_node_conf(self):
"""
Merge default RHEL node config with the user provided
config
"""
conf = self.read_default_config(constants.RHEL_WORKERS_CONF)
default_conf = conf.get("ENV_DATA")
merge_dict(default_conf, self.node_conf)
logger.info(f"Merged dict is {default_conf}")
return default_conf
def _prepare_upi_rhel_node(self, node_conf):
"""
Handle RHEL worker instance creation
1. Create RHEL worker instance , copy required AWS tags from existing
worker instances to new RHEL instance
2. Copy IAM role from existing worker to new RHEL worker
"""
cluster_id = get_infra_id(self.cluster_path)
node_id = node_conf["node_id"]
zone = node_conf.get("zone")
logger.info("Creating RHEL worker node")
self.gather_worker_data(f"no{zone}")
response = self.client.run_instances(
BlockDeviceMappings=[
{
"DeviceName": node_conf["root_disk"],
"Ebs": {
"DeleteOnTermination": True,
"VolumeSize": node_conf["root_disk_size"],
"VolumeType": "gp2",
},
},
],
ImageId=node_conf["rhel_worker_ami"],
SubnetId=self.worker_subnet,
InstanceType=node_conf["rhel_worker_instance_type"],
MaxCount=1,
MinCount=1,
Monitoring={"Enabled": False},
SecurityGroupIds=[
self.worker_security_group[0]["GroupId"],
],
KeyName="openshift-dev",
)
inst_id = response["Instances"][0]["InstanceId"]
worker_ec2 = boto3.resource("ec2", region_name=self.region)
worker_instance = worker_ec2.Instance(inst_id)
worker_instance.wait_until_running()
worker_name = f"{cluster_id}-rhel-worker-{node_id}"
worker_ec2.create_tags(
Resources=[inst_id],
Tags=[
{"Key": "Name", "Value": f"{worker_name}"},
{"Key": self.worker_tag[0], "Value": self.worker_tag[1]},
],
)
logging.info(self.worker_iam_role)
self.client.associate_iam_instance_profile(
IamInstanceProfile=self.worker_iam_role,
InstanceId=inst_id,
)
return worker_instance
def gather_worker_data(self, suffix="no0"):
"""
Gather various info like vpc, iam role, subnet,security group,
cluster tag from existing RHCOS workers
Args:
suffix (str): suffix to get resource of worker node, 'no0' by default
"""
stack_name = f"{self.cluster_name}-{suffix}"
resource = self.cf.list_stack_resources(StackName=stack_name)
worker_id = self.get_worker_resource_id(resource)
ec2 = boto3.resource("ec2", region_name=self.region)
worker_instance = ec2.Instance(worker_id)
self.worker_vpc = worker_instance.vpc.id
self.worker_subnet = worker_instance.subnet.id
self.worker_security_group = worker_instance.security_groups
self.worker_iam_role = worker_instance.iam_instance_profile
self.worker_tag = self.get_kube_tag(worker_instance.tags)
self.worker_image_id = worker_instance.image.id # | |
that the branch from that method has been fully traversed
if not methodInDB(method_name, dict_link, crsr)[0]:
getInterfacesFromReport(method_name, dict_link, folder_path, crsr, inv_list_text[0]) # calls getInt. on next method
def createInterfaceGraphFromDB(method_name, dict_link, interface_db_cursor):
"""
When the method passed in to the script is in the database we can recreate the interface using SQL select statements
"""
crsr = interface_db_cursor
global interface_graph
interface_graph = {}
getInterfacesFromDB( method_name, dict_link, crsr )
return interface_graph
def getInterfacesFromDB( method_name, dict_link, crsr ):
"""
Recursive method to create the interface graph using inline SQL
"""
if "::" in method_name: # break the method into pieces for the query
method = method_name.split('::')
cn = method[0].strip()
mn = '::'.join(method[1:]).strip()
else:
cn = "Unknown"
mn = method_name
# SQL statement to get all of the invoked methods from the caller currently passed in, basically recreating an invocation tree
crsr.execute("SELECT callee_class, callee_method_name, callee_dict_link FROM interfaces WHERE caller_class = ? AND caller_method_name = ? and caller_dict_link = ?", (cn, mn, dict_link ))
res = crsr.fetchall()
if len(res) == 0: # if there was nothing there
return
method_name = method_name + '#' + dict_link
interface_graph[method_name] = [] # we assume there won't be any cycles in the graph because when it was made from reports it took care of cycles
for i in res:
if i[0] == 'Unknown': # build the method together for the interface graph
j = i[1]
tmp = j + '#' + i[2]
else:
j = "::".join(i[0:2])
tmp = j + '#' + i[2]
interface_graph[method_name].append(tmp)
getInterfacesFromDB( j, i[2], crsr ) #recursive call to continue building the tree
def getMethodSignature(method_name, folder_path):
"""
Method that gets the method signature and method text from the actual
.cpp and .h files in the FSW from links in the data dictionary.
Opens the data dictionary file and then parses the .cpp or .h file for the method
"""
if len(method_name) == 0: # some case where we get an empty method_name
return ['','']
if '(Virtual)' in method_name: # if, for some reason there is still virtual in the method name, remove it
method_name = method_name[:-10].strip()
if not method_name[0].isalpha(): # account for non alpha starting methods
first = 'Non-Alpha'
else:
first = method_name[0]
tmp = method_name.split('#')
method_name = tmp[0].strip()
dict_link = tmp[1].strip()
dict_html = open(folder_path + '\dictionary_' + first + '.html', encoding='utf8') #open the correct data dictionary file
dict_html = BeautifulSoup(dict_html, 'html.parser')
# use regex to detect the correct block in the data dictionary, using the unqiue data dictionary link described above
regex_func_html_block = re.compile(r'<a name=\"%s\">.+?\n\n' % dict_link, re.DOTALL | re.MULTILINE )
func_html_block = regex_func_html_block.search( str(dict_html) )
if not func_html_block: # the regex for the correct dictionary block failed
return [method_name, 'Could not locate definition. Please refer to the docs.']
func_html_block = func_html_block.group()
if 'Unknown ' in func_html_block: # function is like memset or something built in
return [method_name, 'Built in method. No defintion provided']
# now search for the .cpp or .h file link
try:
regex_file_link = re.compile(r'<a href=\"(.+)\">')
file_link = regex_file_link.search(func_html_block.split('\n')[1]).group(1)
except: # no definition link found
return [method_name, 'Could not locate definition. Please refer to the docs.']
file_link = file_link.split('#') #the file link is formatted like: "12313.html#45" with the first number the file number and the #num the line number
line_num = file_link[1].strip() # store line number in its own var
#----regex compilation-----#
regex_method_sig = re.compile( r'%s(.+[^\;])\{' % line_num, re.DOTALL | re.MULTILINE )
regex_header = re.compile( r'%s(.+)\;' % line_num, re.DOTALL | re.MULTILINE )
#--------------------------#
file_link = file_link[0].strip() # same as file_link
def_html = open(folder_path + "\\" + file_link, encoding='utf8')
def_lines = def_html.readlines() #open the .cpp/.h file
inFunction = False
method_sig = None
method_text = ''
bracestack = [] # initialize a stack that will determine when the method text starts and ends
header = '' # if this is a header and not a link to definition this will become method_sig
header_only = '' #this string is just in case it is a header and no method definition (html)
for line in def_lines: # go through the file looking for the right line
if "<a name=\"%s\">" % line_num in line and not inFunction: # we are at the correcet line
inFunction = True # we are now "inFunction"
if inFunction: #if we are inFunction we want to capture the text of this line
method_text = method_text + line #initially will hold html string of the function
header_only = header_only + line # a second variable for if the file is simply a header only declaration
nocomment = commentRemover(line) # remove the comments from the line temporarily for regex so it doesn't trip it
# to convert header to text
header_html = header_only
header_html = BeautifulSoup(header_html, 'html.parser')
header_html_text = header_html.get_text()
if "{" in nocomment and inFunction:
bracestack.append("{") #push to brace stack
if method_sig == None: # we haven't found the method signature yet
method_sig_html = method_text
method_sig_html = BeautifulSoup(method_sig_html, 'html.parser')
try: # try to regex for the method signature, if it fails pass for now
method_sig = regex_method_sig.search( method_sig_html.get_text() ).group(1).strip()
if '\n' in method_sig[1:]: #remove any trailing line #'s in header
inds = [ i for i, ch in enumerate(method_sig) if ch == '\n' ]
method_sig = list(method_sig)
for i in inds:
i += 1
while method_sig[i].isdigit():
method_sig[i] = ' '
i+=1
if i == len(method_sig):
break
method_sig = "".join(method_sig) #method sig now doesn't have line nums if it is mutliple lines
except:
pass
if "}" in nocomment and inFunction:
bracestack.append("}")
if bracestack[len(bracestack)-1] == "}" and bracestack[len(bracestack)-2] == "{": #matched
bracestack.pop() # if we have matched two braces together we can pop them off the stack
bracestack.pop()
if len(bracestack) == 0 and inFunction and method_sig != None: #if after we pop off the braces len == 0 and inFunction
inFunction = False # we know we are at the end of the function, finish this loop
break
#---------- for header only declarations
# This is meant to catch the cases when the data dictionary links the script to a file
# where ther is no method definition, but rather a headers declaration
# this happens in .h files typically
elif ';' in header_html_text and method_sig == None and len(header) == 0: # could be header, because no method sig found
header = regex_header.search( header_html.get_text() )
try:
header = header.group(1).strip() # if we got something with regex
except Exception as e:
print('\n') # if the regex fails then print out the failure to the screen and continue searching
print(e) # ideallly this doesn't ever happen, code hasn't reached here in recent runs (07/20/18)
print(header_html.get_text(), '\n\n')
continue
if '\n' in header[1:]: #remove any trailing line #'s in header
inds = [ i for i, ch in enumerate(header) if ch == '\n' ]
header = list(header) # this is the same as above, remove line #'s if multiple line header
for i in inds:
i += 1
while header[i].isdigit():
header[i] = ' '
i+=1
if i == len(header):
break
header = "".join(header)
break
#----------
method_text = BeautifulSoup(method_text, 'html.parser') # the html we have collected in the loop for the whole method's text
method_text = method_text.get_text() # turn it into text from HTML, will be displayed at the bottom of the excel sheet
if header and method_sig == None: #potential its a header only declaration, in this case we didn't get any method text
method_sig = header # so we output a message in the excel sheete for the analyst to check the reports later
method_text = 'Header only declaration. Check file ' + file_link + ' in the HTML reports for more information.'
return [method_sig, method_text]
def commentRemover(text): #credit to ChunMinChang for this method: https://gist.github.com/ChunMinChang/88bfa5842396c1fbbc5b
"""Method that will remove any C++ style comments from a string, shoutout ChunMinChang"""
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.