input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= msgbox.exec_()
if result ==QtWidgets.QMessageBox.Cancel:
return
if thread is True:
self.worker_task.emit({'fcn': self.save_project,
'params': [filename]})
else:
self.save_project(filename)
# self.save_project(filename)
self.file_opened.emit("project", filename)
self.file_saved.emit("project", filename)
if not make_copy:
self.project_filename = filename
def export_svg(self, obj_name, filename, scale_factor=0.00):
"""
Exports a Geometry Object to an SVG file.
:param filename: Path to the SVG file to save to.
:return:
"""
if filename is None:
filename = self.defaults["global_last_save_folder"]
self.log.debug("export_svg()")
try:
obj = self.collection.get_by_name(str(obj_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % obj_name
with self.proc_container.new("Exporting SVG") as proc:
exported_svg = obj.export_svg(scale_factor=scale_factor)
# Determine bounding area for svg export
bounds = obj.bounds()
size = obj.size()
# Convert everything to strings for use in the xml doc
svgwidth = str(size[0])
svgheight = str(size[1])
minx = str(bounds[0])
miny = str(bounds[1] - size[1])
uom = obj.units.lower()
# Add a SVG Header and footer to the svg output from shapely
# The transform flips the Y Axis so that everything renders
# properly within svg apps such as inkscape
svg_header = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" '
svg_header += 'width="' + svgwidth + uom + '" '
svg_header += 'height="' + svgheight + uom + '" '
svg_header += 'viewBox="' + minx + ' ' + miny + ' ' + svgwidth + ' ' + svgheight + '">'
svg_header += '<g transform="scale(1,-1)">'
svg_footer = '</g> </svg>'
svg_elem = svg_header + exported_svg + svg_footer
# Parse the xml through a xml parser just to add line feeds
# and to make it look more pretty for the output
svgcode = parse_xml_string(svg_elem)
with open(filename, 'w') as fp:
fp.write(svgcode.toprettyxml())
self.file_saved.emit("SVG", filename)
self.inform.emit("[success] SVG file exported to " + filename)
def export_svg_negative(self, obj_name, box_name, filename, boundary, scale_factor=0.00, use_thread=True):
"""
Exports a Geometry Object to an SVG file in negative.
:param filename: Path to the SVG file to save to.
:param: use_thread: If True use threads
:type: Bool
:return:
"""
if filename is None:
filename = self.defaults["global_last_save_folder"]
self.log.debug("export_svg() negative")
try:
obj = self.collection.get_by_name(str(obj_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % obj_name
try:
box = self.collection.get_by_name(str(box_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % box_name
if box is None:
self.inform.emit("[warning_notcl]No object Box. Using instead %s" % obj)
box = obj
def make_negative_film():
exported_svg = obj.export_svg(scale_factor=scale_factor)
self.progress.emit(40)
# Determine bounding area for svg export
bounds = box.bounds()
size = box.size()
uom = obj.units.lower()
# Convert everything to strings for use in the xml doc
svgwidth = str(size[0] + (2 * boundary))
svgheight = str(size[1] + (2 * boundary))
minx = str(bounds[0] - boundary)
miny = str(bounds[1] + boundary + size[1])
miny_rect = str(bounds[1] - boundary)
# Add a SVG Header and footer to the svg output from shapely
# The transform flips the Y Axis so that everything renders
# properly within svg apps such as inkscape
svg_header = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" '
svg_header += 'width="' + svgwidth + uom + '" '
svg_header += 'height="' + svgheight + uom + '" '
svg_header += 'viewBox="' + minx + ' -' + miny + ' ' + svgwidth + ' ' + svgheight + '" '
svg_header += '>'
svg_header += '<g transform="scale(1,-1)">'
svg_footer = '</g> </svg>'
self.progress.emit(60)
# Change the attributes of the exported SVG
# We don't need stroke-width - wrong, we do when we have lines with certain width
# We set opacity to maximum
# We set the color to WHITE
root = ET.fromstring(exported_svg)
for child in root:
child.set('fill', '#FFFFFF')
child.set('opacity', '1.0')
child.set('stroke', '#FFFFFF')
# first_svg_elem = 'rect x="' + minx + '" ' + 'y="' + miny_rect + '" '
# first_svg_elem += 'width="' + svgwidth + '" ' + 'height="' + svgheight + '" '
# first_svg_elem += 'fill="#000000" opacity="1.0" stroke-width="0.0"'
first_svg_elem_tag = 'rect'
first_svg_elem_attribs = {
'x': minx,
'y': miny_rect,
'width': svgwidth,
'height': svgheight,
'id': 'neg_rect',
'style': 'fill:#000000;opacity:1.0;stroke-width:0.0'
}
root.insert(0, ET.Element(first_svg_elem_tag, first_svg_elem_attribs))
exported_svg = ET.tostring(root)
svg_elem = svg_header + str(exported_svg) + svg_footer
self.progress.emit(80)
# Parse the xml through a xml parser just to add line feeds
# and to make it look more pretty for the output
doc = parse_xml_string(svg_elem)
with open(filename, 'w') as fp:
fp.write(doc.toprettyxml())
self.progress.emit(100)
self.file_saved.emit("SVG", filename)
self.inform.emit("[success] SVG file exported to " + filename)
if use_thread is True:
proc = self.proc_container.new("Generating Film ... Please wait.")
def job_thread_film(app_obj):
try:
make_negative_film()
except Exception as e:
proc.done()
return
proc.done()
self.worker_task.emit({'fcn': job_thread_film, 'params': [self]})
else:
make_negative_film()
def export_svg_black(self, obj_name, box_name, filename, scale_factor=0.00, use_thread=True):
"""
Exports a Geometry Object to an SVG file in negative.
:param filename: Path to the SVG file to save to.
:param: use_thread: If True use threads
:type: Bool
:return:
"""
if filename is None:
filename = self.defaults["global_last_save_folder"]
self.log.debug("export_svg() black")
try:
obj = self.collection.get_by_name(str(obj_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % obj_name
try:
box = self.collection.get_by_name(str(box_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % box_name
if box is None:
self.inform.emit("[warning_notcl]No object Box. Using instead %s" % obj)
box = obj
def make_black_film():
exported_svg = obj.export_svg(scale_factor=scale_factor)
self.progress.emit(40)
# Change the attributes of the exported SVG
# We don't need stroke-width
# We set opacity to maximum
# We set the colour to WHITE
root = ET.fromstring(exported_svg)
for child in root:
child.set('fill', '#000000')
child.set('opacity', '1.0')
child.set('stroke', '#000000')
exported_svg = ET.tostring(root)
# Determine bounding area for svg export
bounds = box.bounds()
size = box.size()
# This contain the measure units
uom = obj.units.lower()
# Define a boundary around SVG of about 1.0mm (~39mils)
if uom in "mm":
boundary = 1.0
else:
boundary = 0.0393701
self.progress.emit(80)
# Convert everything to strings for use in the xml doc
svgwidth = str(size[0] + (2 * boundary))
svgheight = str(size[1] + (2 * boundary))
minx = str(bounds[0] - boundary)
miny = str(bounds[1] + boundary + size[1])
self.log.debug(minx)
self.log.debug(miny)
# Add a SVG Header and footer to the svg output from shapely
# The transform flips the Y Axis so that everything renders
# properly within svg apps such as inkscape
svg_header = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" '
svg_header += 'width="' + svgwidth + uom + '" '
svg_header += 'height="' + svgheight + uom + '" '
svg_header += 'viewBox="' + minx + ' -' + miny + ' ' + svgwidth + ' ' + svgheight + '" '
svg_header += '>'
svg_header += '<g transform="scale(1,-1)">'
svg_footer = '</g> </svg>'
svg_elem = str(svg_header) + str(exported_svg) + str(svg_footer)
self.progress.emit(90)
# Parse the xml through a xml parser just to add line feeds
# and to make it look more pretty for the output
doc = parse_xml_string(svg_elem)
with open(filename, 'w') as fp:
fp.write(doc.toprettyxml())
self.progress.emit(100)
self.file_saved.emit("SVG", filename)
self.inform.emit("[success] SVG file exported to " + filename)
if use_thread is True:
proc = self.proc_container.new("Generating Film ... Please wait.")
def job_thread_film(app_obj):
try:
make_black_film()
except Exception as e:
proc.done()
return
proc.done()
self.worker_task.emit({'fcn': job_thread_film, 'params': [self]})
else:
make_black_film()
def export_excellon(self, obj_name, filename, altium_format=None, use_thread=True):
"""
Exports a Geometry Object to an Excellon file.
:param filename: Path to the Excellon file to save to.
:return:
"""
if filename is None:
filename = self.defaults["global_last_save_folder"]
self.log.debug("export_excellon()")
format_exc = ';FILE_FORMAT=2:4\n'
units = ''
try:
obj = self.collection.get_by_name(str(obj_name))
except:
# TODO: The return behavior has not been established... should raise exception?
return "Could not retrieve object: %s" % obj_name
# updated units
units = self.general_options_form.general_group.units_radio.get_value().upper()
if units == 'IN' or units == 'INCH':
units = 'INCH'
elif units == 'MM' or units == 'METRIC':
units ='METRIC'
def make_excellon():
try:
time_str = "{:%A, %d %B %Y at %H:%M}".format(datetime.now())
header = 'M48\n'
header += ';EXCELLON GENERATED BY FLATCAM - www.flatcam.org 2018\n'
header += | |
[], # Pickles, cucumber, dill, reduced sodium
11948: [], # Pickles, cucumber, sweet, low sodium (includes bread and butter pickles)
11949: [], # Catsup, low sodium
11950: ["Enoki mushroom"], # Mushrooms, enoki, raw
11951: ["Yellow pepper", "sweet"], # Peppers, sweet, yellow, raw
11952: ["Radicchio"], # Radicchio, raw
11953: [], # Squash, zucchini, baby, raw
11954: ["Tomatillo"], # Tomatillos, raw
11955: [], # Tomatoes, sun-dried
11956: [], # Tomatoes, sun-dried, packed in oil, drained
11957: ["Fennel", "bulb"], # Fennel, bulb, raw
11958: [], # Pickle relish, hamburger
11959: ["Arugula"], # Arugula, raw
11960: ["Baby carrot"], # Carrots, baby, raw
11961: [], # Hearts of palm, canned
11962: [], # Peppers, hot chile, sun-dried
11963: ["Nopales"], # Nopales, raw
11964: [], # Nopales, cooked, without salt
11965: ["Cauliflower", "green"], # Cauliflower, green, raw
11967: [], # Cauliflower, green, cooked, no salt added
11968: [], # Cauliflower, green, cooked, with salt
11969: [], # Broccoli, chinese, cooked
11970: [], # Cabbage, napa, cooked
11972: ["Lemon grass", "", "Citronella"], # Lemon grass (citronella), raw
11973: ["Fava bean"], # Beans, fava, in pod, raw
11974: ["Grape leaf"], # Grape leaves, raw
11975: [], # Grape leaves, canned
11976: ["Banana pepper"], # Pepper, banana, raw
11977: ["Serrano pepper"], # Peppers, serrano, raw
11978: [], # Peppers, ancho, dried
11979: ["Jalapeno pepper"], # Peppers, jalapeno, raw
11980: [], # Peppers, chili, green, canned
11981: ["Hungarian pepper"], # Peppers, hungarian, raw
11982: [], # Peppers, pasilla, dried
11983: [], # Pickles, chowchow, with cauliflower onion mustard, sweet
11984: ["Epazote"], # Epazote, raw
11985: ["Fireweed"], # Fireweed, leaves, raw
11986: [], # Malabar spinach, cooked
11987: ["Oyster mushroom"], # Mushrooms, oyster, raw
11988: [], # Fungi, Cloud ears, dried
11989: [], # Mushrooms, straw, canned, drained solids
11990: ["Wasabi", "root"], # Wasabi, root, raw
11991: ["Yautia", "", "Tannier"], # Yautia (tannier), raw
11992: [], # Mushrooms, white, microwaved
11993: ["Maitake mushroom"], # Mushrooms, maitake, raw
11994: ["Broccoli", "chinese"], # Broccoli, chinese, raw
11995: ["Fiddlehead fern"], # Fiddlehead ferns, raw
11996: [], # Fiddlehead ferns, frozen, unprepared
11998: [], # Mushrooms, portabella, exposed to ultraviolet light, raw
12001: ["Breadfruit seed"], # Seeds, breadfruit seeds, raw
12003: [], # Seeds, breadfruit seeds, boiled
12004: ["Breadnut tree seed"], # Seeds, breadnut tree seeds, raw
12005: [], # Seeds, breadnut tree seeds, dried
12006: [], # Seeds, chia seeds, dried
12007: [], # Seeds, cottonseed flour, partially defatted (glandless)
12008: [], # Seeds, cottonseed flour, low fat (glandless)
12011: [], # Seeds, cottonseed meal, partially defatted (glandless)
12012: [], # Seeds, hemp seed, hulled
12013: [], # Seeds, lotus seeds, dried
12014: [], # Seeds, pumpkin and squash seed kernels, dried
12016: [], # Seeds, pumpkin and squash seed kernels, roasted, without salt
12021: [], # Seeds, safflower seed kernels, dried
12022: [], # Seeds, safflower seed meal, partially defatted
12023: [], # Seeds, sesame seeds, whole, dried
12024: [], # Seeds, sesame seeds, whole, roasted and toasted
12029: [], # Seeds, sesame seed kernels, toasted, without salt added (decorticated)
12032: [], # Seeds, sesame flour, partially defatted
12033: [], # Seeds, sesame flour, low-fat
12034: [], # Seeds, sesame meal, partially defatted
12036: [], # Seeds, sunflower seed kernels, dried
12037: [], # Seeds, sunflower seed kernels, dry roasted, without salt
12038: [], # Seeds, sunflower seed kernels, oil roasted, without salt
12039: [], # Seeds, sunflower seed kernels, toasted, without salt
12040: [], # Seeds, sunflower seed butter, without salt
12041: [], # Seeds, sunflower seed flour, partially defatted
12058: ["Acorn nut"], # Nuts, acorns, raw
12059: [], # Nuts, acorns, dried
12060: [], # Nuts, acorn flour, full fat
12061: ["Almond"], # Nuts, almonds
12062: [], # Nuts, almonds, blanched
12063: [], # Nuts, almonds, dry roasted, without salt added
12065: [], # Nuts, almonds, oil roasted, without salt added
12071: [], # Nuts, almond paste
12077: [], # Nuts, beechnuts, dried
12078: [], # Nuts, brazilnuts, dried, unblanched
12084: [], # Nuts, butternuts, dried
12085: [], # Nuts, cashew nuts, dry roasted, without salt added
12086: [], # Nuts, cashew nuts, oil roasted, without salt added
12087: ["Cashew nut"], # Nuts, cashew nuts, raw
12088: [], # Nuts, cashew butter, plain, without salt added
12093: ["Chestnut", "chinese"], # Nuts, chestnuts, chinese, raw
12094: [], # Nuts, chestnuts, chinese, dried
12095: [], # Nuts, chestnuts, chinese, boiled and steamed
12096: [], # Nuts, chestnuts, chinese, roasted
12097: ["Chestnut", "european"], # Nuts, chestnuts, european, raw, unpeeled
12098: [], # Nuts, chestnuts, european, raw, peeled
12099: [], # Nuts, chestnuts, european, dried, unpeeled
12100: [], # Nuts, chestnuts, european, dried, peeled
12101: [], # Nuts, chestnuts, european, boiled and steamed
12104: ["Coconut meat"], # Nuts, coconut meat, raw
12108: [], # Nuts, coconut meat, dried (desiccated), not sweetened
12109: [], # Nuts, coconut meat, dried (desiccated), sweetened, flaked, packaged
12110: [], # Nuts, coconut meat, dried (desiccated), sweetened, flaked, canned
12114: [], # Nuts, coconut meat, dried (desiccated), toasted
12115: [], # Nuts, coconut cream, raw (liquid expressed from grated meat)
12116: [], # Nuts, coconut cream, canned, sweetened
12117: [
"Coconut milk"
], # Nuts, coconut milk, raw (liquid expressed from grated meat and water)
12118: [], # Nuts, coconut milk, canned (liquid expressed from grated meat and water)
12119: [], # Nuts, coconut water (liquid from coconuts)
12120: ["Hazelnut", "", "Filbert"], # Nuts, hazelnuts or filberts
12121: [], # Nuts, hazelnuts or filberts, blanched
12122: [], # Nuts, hazelnuts or filberts, dry roasted, without salt added
12127: ["Ginkgo nut"], # Nuts, ginkgo nuts, raw
12128: [], # Nuts, ginkgo nuts, dried
12129: [], # Nuts, ginkgo nuts, canned
12130: [], # Nuts, hickorynuts, dried
12131: ["Macadamia nut"], # Nuts, macadamia nuts, raw
12132: [], # Nuts, macadamia nuts, dry roasted, without salt added
12135: [], # Nuts, mixed nuts, dry roasted, with peanuts, without salt added
12136: [], # Nuts, mixed nuts, dry roasted, with peanuts, salt added, PLANTERS pistachio blend
12137: [], # Nuts, mixed nuts, oil roasted, with peanuts, without salt added
12138: [], # Nuts, mixed nuts, oil roasted, without peanuts, without salt added
12140: [], # Nuts, formulated, wheat-based, unflavored, with salt added
12141: [], # Nuts, mixed nuts, dry roasted, with peanuts, salt added, CHOSEN ROASTER
12142: [], # Nuts, pecans
12143: [], # Nuts, pecans, dry roasted, without salt added
12144: [], # Nuts, pecans, oil roasted, without salt added
12145: [], # Nuts, pilinuts, dried
12147: [], # Nuts, pine nuts, dried
12149: [], # Nuts, pine nuts, pinyon, dried
12151: ["Pistachio nut"], # Nuts, pistachio nuts, raw
12152: [], # Nuts, pistachio nuts, dry roasted, without salt added
12154: [], # Nuts, walnuts, black, dried
12155: [], # Nuts, walnuts, english
12156: [], # Nuts, walnuts, glazed
12157: [], # Nuts, walnuts, dry roasted, with salt added
12158: [], # Seeds, breadfruit seeds, roasted
12160: [], # Seeds, cottonseed kernels, roasted (glandless)
12163: [], # Seeds, pumpkin and squash seeds, whole, roasted, without salt
12166: [], # Seeds, sesame butter, tahini, from roasted and toasted kernels (most common type)
12167: [], # Nuts, chestnuts, european, roasted
12169: [], # Seeds, sesame butter, paste
12170: [], # Seeds, sesame flour, high-fat
12171: [], # Seeds, sesame butter, tahini, from unroasted kernels (non-chemically removed seed coat)
12174: [], # Seeds, watermelon seed kernels, dried
12175: [], # Nuts, chestnuts, japanese, dried
12176: [], # Nuts, coconut milk, frozen (liquid expressed from grated meat and water)
12177: [], # Nuts, coconut meat, dried (desiccated), creamed
12179: [], # Nuts, coconut meat, dried (desiccated), sweetened, shredded
12193: [], # Seeds, sisymbrium sp. seeds, whole, dried
12195: [], # Nuts, almond butter, plain, without salt added
12198: [], # Seeds, sesame butter, tahini, from raw and stone | |
#!/usr/bin/env python
# This will (hopefully) be the code to extract symmetry operations
# from Hall symbols
import numpy as np
lattice_symbols = {
'P': [[0, 0, 0]],
'A': [[0, 0, 0], [0, 1./2, 1./2]],
'B': [[0, 0, 0], [1./2, 0, 1./2]],
'C': [[0, 0, 0], [1./2, 1./2, 0]],
'I': [[0, 0, 0], [1./2, 1./2, 1./2]],
'R': [[0, 0, 0], [2./3, 1./3, 1./3], [1./3, 2./3, 2./3]],
'H': [[0, 0, 0], [2./3, 1./3, 0], [1./3, 2./3, 0]],
'F': [[0, 0, 0], [0, 1./2, 1./2], [1./2, 0, 1./2], [1./2, 1./2, 0]]
}
rotation_matrices = {
'1x': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'1y': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'1z': [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
'2x': [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]],
'2y': [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]],
'2z': [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]],
'3x': [[1, 0, 0],
[0, 0, -1],
[0, 1, -1]],
'3y': [[-1, 0, 1],
[0, 1, 0],
[-1, 0, 0]],
'3z': [[0, -1, 0],
[1, -1, 0],
[0, 0, 1]],
'4x': [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]],
'4y': [[0, 0, 1],
[0, 1, 0],
[-1, 0, 0]],
'4z': [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]],
'6x': [[1, 0, 0],
[0, 1, -1],
[0, 1, 0]],
'6y': [[0, 0, 1],
[0, 1, 0],
[-1, 0, 1]],
'6z': [[1, -1, 0],
[1, 0, 0],
[0, 0, 1]],
'2px': [[-1, 0, 0], # b-c
[0, 0, -1],
[0, -1, 0]],
'2ppx': [[-1, 0, 0], # b+c
[0, 0, 1],
[0, 1, 0]],
'2py': [[0, 0, -1], # a-c
[0, -1, 0],
[-1, 0, 0]],
'2ppy': [[0, 0, 1], # a+c
[0, -1, 0],
[1, 0, 0]],
'2pz': [[0, -1, 0], # a-b
[-1, 0, 0],
[0, 0, -1]],
'2ppz': [[0, 1, 0], # a+b
[1, 0, 0],
[0, 0, -1]],
'3*': [[0, 0, 1], # a+b+c
[1, 0, 0],
[0, 1, 0]]
}
translations = {
'a': [1./2, 0, 0],
'b': [0, 1./2, 0],
'c': [0, 0, 1./2],
'n': [1./2, 1./2, 1./2],
'u': [1./4, 0, 0],
'v': [0, 1./4, 0],
'w': [0, 0, 1./4],
'd': [1./4, 1./4, 1./4]
}
def read_spg_csv(filename="spg.csv"):
hall_symbols = []
for line in open(filename):
data = line.split(',')
hall_symbols.append([data[6], data[4]])
return hall_symbols
class HallSymbol:
def __init__(self, hall_symbol):
self.hall_symbol = hall_symbol.split()
self._decompose()
self._full_operations()
def get_LNV(self):
return self.L, self.N, self.V
def get_operations(self):
return self.G_R, self.G_T
def _full_operations(self):
gens_R, gens_T = self._generators()
E = np.array(rotation_matrices['1x'])
T0 = np.zeros(3, dtype=float)
if self.L[0] == '-':
G_R = [E, -E]
G_T = [T0, T0]
else:
G_R = [E]
G_T = [T0]
for r, t in zip(gens_R, gens_T):
G2_R, G2_T = self._get_group(r, t)
G_R, G_T = self._multiply_groups(G_R, G_T, G2_R, G2_T)
if self.V is not None:
G_T = self._change_of_basis(G_R, G_T)
G_R_with_centres = []
G_T_with_centred = []
for t in lattice_symbols[self.L[-1]]:
self._lattice_translation(G_R_with_centres,
G_T_with_centred,
G_R, G_T, t)
self.G_R = np.array(G_R_with_centres)
self.G_T = np.array(G_T_with_centred)
# Make sure the first operation has no rotation.
assert (self.G_R[0] == rotation_matrices['1x']).all()
# In Hall numbers 212, 213, 214, the first operation has non-zero
# translation. This translation is subtracted from all operations.
self.G_T -= self.G_T[0]
self.G_T -= np.rint(self.G_T)
cond = self.G_T < -1e-3
self.G_T[cond] += 1
def _change_of_basis(self, G_R, G_T):
G_T_new = []
v = self.V.astype(float) / 12
for r, t in zip(G_R, G_T):
G_T_new.append(-np.dot(r, v) + t + v)
return G_T_new
def _lattice_translation(self, G_R, G_T, G_R0, G_T0, translation):
for r, t in zip(G_R0, G_T0):
G_R.append(r.copy())
t_new = t + translation
G_T.append(t_new)
def _multiply_groups(self, G1_R, G1_T, G2_R, G2_T): # G2xG1
G_R = []
G_T = []
for r1, t1 in zip(G2_R, G2_T):
for r2, t2 in zip(G1_R, G1_T):
G_R.append(np.dot(r1, r2))
G_T.append(np.dot(r1, t2) + t1)
return G_R, G_T
def _get_group(self, r, t):
G_R = [r, ]
G_T = [t, ]
while not (G_R[-1] == rotation_matrices['1x']).all():
_r = np.dot(G_R[-1], r)
_t = np.dot(G_R[-1], t) + G_T[-1]
G_R.append(_r)
G_T.append(_t)
# Bring identity in front
_r = G_R.pop()
_t = G_T.pop()
G_R.insert(0, _r)
G_T.insert(0, _t)
return G_R, G_T
# def _get_group(self, r, t):
# G_R, G_T = self._get_group_recursive([np.array(r)], [np.array(t)])
# r = G_R.pop()
# t = G_T.pop()
# G_R.insert(0, r)
# G_T.insert(0, t)
# return G_R, G_T
# def _get_group_recursive(self, G_R, G_T):
# if not (G_R[-1] == rotation_matrices['1x']).all():
# r = np.dot(G_R[-1], G_R[0])
# t = np.dot(G_R[-1], G_T[0]) + G_T[-1]
# G_R.append(r)
# G_T.append(t)
# self._get_group_recursive(G_R, G_T)
# return G_R, G_T
def _generators(self):
R = []
T = []
for N in self.N:
rot = np.array(rotation_matrices[N[1] + N[2]])
if N[0] == '-':
rot = -rot
R.append(rot)
trans = np.zeros(3, dtype=float)
if N[3] is not None:
for t in N[3]:
if t in ('1', '2', '3', '4', '5'):
trans_screw = float(t) / int(N[1])
if N[2] == 'x':
trans[0] += trans_screw
elif N[2] == 'y':
trans[1] += trans_screw
elif N[2] == 'z':
trans[2] += trans_screw
else:
raise
else:
trans += np.array(translations[t])
T.append(trans)
return np.array(R, dtype=int), np.array(T, dtype=float)
def _rotation_matrix(self, str):
pass
# Decompose Hall symbol
# The following methods are used by _decompose().
def _decompose(self):
L = self.hall_symbol.pop(0)
N = []
V = None
precededN = 0
for i, ms in enumerate(self.hall_symbol):
if ms[0] == '(':
V = self._change_of_basis_symbol(self.hall_symbol[i + 2])
break
else:
N.append(self._matrix_symbol(ms, i, precededN))
precededN = int(N[-1][1][0])
self.L = L
self.N = N
self.V = V
def _matrix_symbol(self, N, i, precededN):
if N[0] == '-':
improper = '-'
N = N[1:]
else:
improper = None
N, R, A = self._rotation(N, i, precededN)
if len(N) > 0:
T = self._translation(N)
else:
T = None
return [improper, R, A, T]
def _rotation(self, N, i, precededN):
A = None
if N[0] == '2':
if len(N) > 1: # 2"
if N[1] == '=':
R = '2pp'
A = 'z'
N = N[2:]
if i == 1 and A is None:
if precededN == 2 or precededN == 4: # 2x
R = '2'
A = 'x'
N = N[1:]
elif precededN == 3 or precededN == 6: # 2'
R = '2p'
A = 'z'
N = N[1:]
elif N[0] == '3': # 3*
if i == 2:
R = '3'
A = '*'
N = N[1:]
elif len(N) > 1:
if N[1] == '*':
R = '3'
A = '*'
N = N[2:]
if A is None:
R = N[0]
N = N[1:]
if len(N) > 0 and i == 0:
N, A = self._principal_axis(N)
else:
A = 'z'
return N, R, A
def _principal_axis(self, N):
if N[0] == 'x':
return N[1:], 'x'
if N[0] == 'y':
return N[1:], 'y'
return N, 'z'
def _translation(self, N):
T = []
for i in range(len(N)):
T.append(N[i])
return T
def _change_of_basis_symbol(self, V):
if V[0] == '-':
return np.array([0, 0, -1])
else:
return np.array([0, 0, 1])
def dump_operations(filename):
hall_symbols = read_spg_csv(filename)
count = 0
print(" 0 , /* dummy */")
for i in range(530):
hs = HallSymbol(hall_symbols[i][0])
G_R, G_T = hs.get_operations()
for j, (r, t) in enumerate(zip(G_R, G_T)):
count += 1
r_encode = encode_rotation(r)
x = np.rint(t * 12).astype(int)
t_encode = x[0] * 144 + x[1] * 12 + x[2]
total = t_encode * 3 ** 9 + r_encode
text = " %-8d," % (total)
text += " /* %4d (%3d) [" % (count, i + 1)
text += "%2d," * 9 % tuple(decode_rotation(total % (3**9)))
text += "%2d,%2d,%2d] */" % tuple(decode_trans(total // (3**9)))
print(text)
def dump_operations_old(filename):
hall_symbols = read_spg_csv(filename)
count = 0
for i in range(530):
hs = HallSymbol(hall_symbols[i][0])
G_R, G_T = hs.get_operations()
for j, (r, t) in enumerate(zip(G_R, G_T)):
count += 1
text = "{%3d," % (i + 1)
text += "%2d,%2d,%2d,%2d,%2d,%2d,%2d,%2d,%2d," % tuple(r.ravel())
text += "%2d,%2d,%2d" % tuple((t * 12 + 0.1).astype(int))
text += "}, /* %4d */" % count
print(text)
# Ternary numerical system
def encode_rotation(r):
r_sum = 0
for i, x in enumerate(r.ravel()):
r_sum += (x + 1) * 3**(8 - i)
return r_sum
def decode_rotation(c):
r = []
for i in range(8, -1, -1):
r.append((c % (3**(i+1))) | |
Called from parser_thread().
"""
if not self.s.serial_read(): # read all available data from serial port
self.disable_receiver()
return
if len(self.s.rx_buff): # if rx buffer is not empty
if self.__rx_state == _SDP_RX_IDLE:
self.__search_for_sof()
elif self.__rx_state == _SDP_RX_ACK:
self.__search_for_ack()
elif self.__rx_state == _SDP_RX_RECEIVING:
self.__append_new_data()
self.__rx_frame_timeout()
elif self.__rx_state == _SDP_RX_DLE:
self.__check_if_eof()
self.__rx_frame_timeout()
else:
self.debug(50)
self.__rx_state = _SDP_RX_IDLE
else: # buffer is empty
# if frame reception is in progress, check timeout
if self.__rx_state != _SDP_RX_IDLE:
self.__rx_frame_timeout()
########################################################################################
def send_data(self, payload):
"""
Transmit data and wait for response. Retry if neccessary.
Return status and received response (array of bytes).
"""
if not self.status(): # check if serial port is opened
self.debug('serial port is not open')
return (False, [])
# check if data elements fit in byte (0 - 255)
if not self.__check_data(payload):
self.debug('invalid payload data')
return (False, [])
retransmit_count = 0
while retransmit_count < SDP_RETRANSMIT:
(status, frame) = self.__compose_frame(payload)
if status:
if self.__transmit_data(frame):
response_timeout = systime.time() + self.response_timeout
self.__rx_state = _SDP_RX_IDLE
self.__expect_response = True
while self.__expect_response:
# TODO
systime.sleep(0) # python v3 threading error solved with this
# https://stackoverflow.com/questions/48356615/python-v3-threading-and-os-context-switching-changed-from-v2
# https://stackoverflow.com/questions/48198172/python-v2-7-and-v3-6-behave-differently-but-the-same
# all incoming data are parsed in parser thread
if systime.time() > response_timeout: # check for response timeout
# response not received in time
self.debug('timeout expecting reseponse')
break
if not self.__expect_response: # parser cleared flag - response received
if self.ack == SDP_ACK:
return (True, self.rx_payload) # success
else:
# response received, but CRC validation failed -> retry
self.debug('CRC validation failure')
# delay to avoid receiver overrun
systime.sleep(SDP_DEFAULT_RETRANSMIT_DELAY)
# else: parser didn't clear expect_response flag, reseponse not received in time
else: # frame transmission unsuccessful
self.debug('transmission failure (take %s)' %
(retransmit_count + 1))
# retry
systime.sleep(SDP_DEFAULT_RETRANSMIT_DELAY)
else: # frame composition error
self.debug('frame composition')
return (False, [])
retransmit_count = retransmit_count + 1
return (False, []) # loop didn't return while executing, error occured
########################################################################################
def send_response(self, payload):
"""
Compose frame with given payload, and transmit it if frame composition succedded.
Return True on successfull transmission, false otherwise.
"""
if not self.status(): # check if serial port is opened
self.debug('serial port is not open')
return False
# check if data elements fit in byte (0 - 255)
if not self.__check_data(payload):
self.debug('invalid payload data')
return (False, [])
if self.ack == SDP_ACK:
(status, frame) = self.__compose_frame(payload)
if not status:
self.debug('frame composition')
return False
else: # ack = NACK (CRC values does not match)
(status, frame) = self.__compose_nack_frame(payload)
if not status:
self.debug('NACK frame composition')
return False
# frame composition OK, send data
if self.__transmit_data(frame):
return True
else:
self.debug('transmission failure')
return False
########################################################################################
def send_dummy_response(self):
"""
Builds minimum frame accordingly to SDP frame definition (without payload, only ack field)
Returns True on success, false otherwise
"""
if not self.status(): # check if serial port is opened
self.debug('serial port is not open')
return False
frame = []
self.ack = SDP_ACK
frame.append(_SDP_SOF)
frame.append(self.ack)
frame.append(_SDP_EOF)
if self.__transmit_data(frame):
return True
else: # transmission failed
self.debug('transmission failure')
return False
########################################################################################
def __transmit_data(self, frame):
"""
Transmit frame array through node's serial port.
Returns True on success, False otherwise
"""
status = self.s.serial_write(frame)
if not status:
self.__thread_stop_flag = True
return status
########################################################################################
def __handle_message(self):
"""
Handle received message or returned response (clears expecting_response flag).
This function calls user defined message handler function (parameter of SDP class)
"""
if self.__expect_response:
self.__expect_response = False
else:
if self.ack == SDP_ACK: # if message received correctly, pass it to user
self.user_message_handler(self.id, self.rx_payload)
# message CRC failure, send response (return received payload)
else:
if not self.send_response(self.rx_payload):
self.debug('send response failure')
########################################################################################
def __search_for_sof(self):
""" Search for "start of frame" character """
(status, byte) = self.s.get_rx_buff_byte()
while status:
if byte == _SDP_SOF:
self.__rx_state = _SDP_RX_ACK
self.ack = SDP_ACK
self.__rx_start_time = systime.time()
return
else: # else, garbage data, search for SOF continues
(status, byte) = self.s.get_rx_buff_byte()
########################################################################################
def __search_for_ack(self):
""" First byte after SOF is ack byte """
(status, byte) = self.s.get_rx_buff_byte()
if status:
self.ack = byte
self.__rx_state = _SDP_RX_RECEIVING
self.rx_payload = [] # clear payload buffer
########################################################################################
def __append_new_data(self):
""" Append new data and check for special characters or EOF flag"""
(status, byte) = self.s.get_rx_buff_byte()
while status:
if byte == _SDP_DLE:
self.__rx_state = _SDP_RX_DLE
return
elif byte == _SDP_EOF:
self.__rx_state = _SDP_RX_IDLE
if len(self.rx_payload) == 0: # empty payload, dummy response or frame error
if self.__expect_response:
self.__expect_response = False # reset flag
# node->ack field is than checked in send_data()
else: # node is not expecting response, so this frame is corrupted or other error occured.
self.debug(
'empty payload while not expecting response')
# in both cases (expecting response or frame error, return)
return
# payload not empty, continue checking and handling message
if not self.__check_rx_message():
self.ack = SDP_NACK
# message CRC validation error
self.debug('CRC validation failure')
for _ in range(_SDP_CRC_SIZE):
self.rx_payload.pop() # clear last elements of payload, since they are CRC
self.__handle_message() # handle message upon expect_response flag, NACK and payload
return # even if bytes are still in rx buffer, start with searching for SOF
else: # received character is not DLE or EOF, append data to payload
if len(self.rx_payload) < (self.max_payload_size + _SDP_CRC_SIZE):
self.rx_payload.append(byte)
else: # discard data, payload size out of range before EOF
self.__rx_state = _SDP_RX_IDLE
self.debug('payload oversized')
return
# handle all bytes in serial rx buffer
(status, byte) = self.s.get_rx_buff_byte()
# end of while loop, no data to handle
########################################################################################
def __check_if_eof(self):
""" Search for "end of frame" character """
(status, byte) = self.s.get_rx_buff_byte()
if status:
if (byte == (_SDP_DLE ^ _SDP_DLE_XOR)) or \
(byte == (_SDP_SOF ^ _SDP_DLE_XOR)) or \
(byte == (_SDP_EOF ^ _SDP_DLE_XOR)):
self.__rx_state = _SDP_RX_RECEIVING
if len(self.rx_payload) < (self.max_payload_size + _SDP_CRC_SIZE):
self.rx_payload.append(byte ^ _SDP_DLE_XOR)
else:
self.debug('payload oversized')
return
else: # framing error, DLE should never appear on its own in message
self.__rx_state = _SDP_RX_IDLE
self.debug('corrupted data, standalone DLE')
########################################################################################
def __rx_frame_timeout(self):
""" Check if frame (and character EOF) arrived in rx_frame_timeout """
if systime.time() > (self.__rx_start_time + self.rx_frame_timeout):
self.__rx_state = _SDP_RX_IDLE
self.rx_payload = [] # discard payload data
self.debug('receiving frame timeout')
return False
else:
return True
########################################################################################
def __check_rx_message(self):
"""
Get message CRC value and compare it with received payload calulated CRC value
Returns True if crc values match, False otherwise
"""
(status, crc_value) = self.__calculate_crc(self.rx_payload)
if status:
# TODO if _SDP_CRC_SIZE is changed to diferent value
if crc_value == [0, 0]:
return True
else:
return False
else: # calculating crc value error
self.debug('calculating CRC value failure')
return False
########################################################################################
def __calculate_crc(self, data):
"""
Calculate CRC-16 value upon data (array of bytes)
Returns tuple of status and array of bytes
"""
if len(data) == 0:
return (True, [0, 0])
# prepare data for crc calculation, check python version
if sys.version[0] >= '3':
# python 3.x
data = bytearray(data)
else:
# python 2.x
x = ''
for d in data:
x = x + chr(d)
data = x
crc_value = self.crc16(data)
# TODO if _SDP_CRC_SIZE is changed to diferent value
if crc_value <= 0xFFFF: # crc_value must fit in _SDP_CRC_SIZE number of bytes
msb = crc_value >> 8
lsb = crc_value & 0x00FF
crc = [msb, lsb]
return (True, crc)
else:
self.debug('CRC value > SDP_CRC_SIZE bytes')
return (False, [])
########################################################################################
def __compose_frame(self, payload):
"""
Compose frame accordingly to SDP protocol
Returns status and array of bytes
"""
frame = []
frame.append(_SDP_SOF)
frame.append(SDP_ACK)
for b in payload:
# check for special characters
if (b == _SDP_SOF) or \
(b == _SDP_DLE) or \
(b == _SDP_EOF):
frame.append(_SDP_DLE)
frame.append(b ^ _SDP_DLE_XOR)
else: # byte is not a special character
frame.append(b)
(status, crc) = self.__calculate_crc(
payload) # calculate payload CRC data
if not status:
self.debug('calculating CRC failure')
return (False, [])
for c in crc: # append crc data with special characters check
# check for special characters
if (c == _SDP_SOF) or \
(c == _SDP_DLE) or \
(c == _SDP_EOF):
frame.append(_SDP_DLE)
frame.append(c ^ _SDP_DLE_XOR)
else: # byte is not a special character
frame.append(c)
| |
import datetime as dt
import json
import logging
import tempfile
from copy import copy
from pathlib import Path
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import numpy as np
from osgeo import gdal
from te_schemas import land_cover
from te_schemas.aoi import AOI
from te_schemas.datafile import combine_data_files
from te_schemas.datafile import DataFile
from te_schemas.jobs import Job
from te_schemas.productivity import ProductivityMode
from te_schemas.results import Band
from te_schemas.results import DataType
from te_schemas.results import Raster
from te_schemas.results import RasterFileType
from te_schemas.results import RasterResults
from te_schemas.results import URI
from . import config
from . import models
from . import worker
from .. import util
from .. import workers
from .. import xl
from ... import __release_date__
from ... import __version__
from ..util_numba import bizonal_total
from ..util_numba import zonal_total
from ..util_numba import zonal_total_weighted
from .land_deg_numba import calc_deg_sdg
from .land_deg_numba import calc_lc_trans
from .land_deg_numba import calc_prod5
from .land_deg_numba import prod5_to_prod3
from .land_deg_numba import recode_deg_soc
from .land_deg_numba import recode_indicator_errors
from .land_deg_numba import recode_state
from .land_deg_numba import recode_traj
from .land_deg_progress import compute_progress_summary
from .land_deg_report import save_reporting_json
from .land_deg_report import save_summary_table_excel
logger = logging.getLogger(__name__)
def _accumulate_ld_summary_tables(
tables: List[models.SummaryTableLD]
) -> models.SummaryTableLD:
if len(tables) == 1:
return tables[0]
out = tables[0]
for table in tables[1:]:
out.soc_by_lc_annual_totals = [
util.accumulate_dicts([a, b]) for a, b in
zip(out.soc_by_lc_annual_totals, table.soc_by_lc_annual_totals)
]
out.lc_annual_totals = [
util.accumulate_dicts([a, b])
for a, b in zip(out.lc_annual_totals, table.lc_annual_totals)
]
out.lc_trans_zonal_areas = [
util.accumulate_dicts([a, b]) for a, b in
zip(out.lc_trans_zonal_areas, table.lc_trans_zonal_areas)
]
# A period should be listed for each object in lc_trans_zonal_areas
assert (
len(out.lc_trans_zonal_areas
) == len(table.lc_trans_zonal_areas_periods)
)
# Periods for lc_trans_zonal_areas must be the same in both objects
assert (
out.lc_trans_zonal_areas_periods ==
table.lc_trans_zonal_areas_periods
)
out.lc_trans_prod_bizonal = util.accumulate_dicts(
[out.lc_trans_prod_bizonal, table.lc_trans_prod_bizonal]
)
out.lc_trans_zonal_soc_initial = util.accumulate_dicts(
[out.lc_trans_zonal_soc_initial, table.lc_trans_zonal_soc_initial]
)
out.lc_trans_zonal_soc_final = util.accumulate_dicts(
[out.lc_trans_zonal_soc_final, table.lc_trans_zonal_soc_final]
)
out.sdg_zonal_population_total = util.accumulate_dicts(
[out.sdg_zonal_population_total, table.sdg_zonal_population_total]
)
out.sdg_zonal_population_male = util.accumulate_dicts(
[out.sdg_zonal_population_male, table.sdg_zonal_population_male]
)
out.sdg_zonal_population_female = util.accumulate_dicts(
[
out.sdg_zonal_population_female,
table.sdg_zonal_population_female
]
)
out.sdg_summary = util.accumulate_dicts(
[out.sdg_summary, table.sdg_summary]
)
assert set(out.prod_summary.keys()) == set(table.prod_summary.keys())
out.prod_summary = {
key: util.accumulate_dicts(
[out.prod_summary[key], table.prod_summary[key]]
)
for key in out.prod_summary.keys()
}
assert set(out.soc_summary.keys()) == set(table.soc_summary.keys())
out.soc_summary = {
key: util.accumulate_dicts(
[out.soc_summary[key], table.soc_summary[key]]
)
for key in out.soc_summary.keys()
}
out.lc_summary = util.accumulate_dicts(
[out.lc_summary, table.lc_summary]
)
return out
def _prepare_land_cover_dfs(params: Dict) -> List[DataFile]:
lc_path = params["layer_lc_path"]
lc_dfs = [
DataFile(
path=util.save_vrt(lc_path, params["layer_lc_deg_band_index"]),
bands=[Band(**params["layer_lc_deg_band"])]
)
]
for lc_aux_band, lc_aux_band_index, in zip(
params["layer_lc_aux_bands"], params["layer_lc_aux_band_indexes"]
):
lc_dfs.append(
DataFile(
path=util.save_vrt(lc_path, lc_aux_band_index),
bands=[Band(**lc_aux_band)]
)
)
lc_dfs.append(
DataFile(
path=util.save_vrt(
params["layer_lc_trans_path"],
params["layer_lc_trans_band_index"],
),
bands=[Band(**params["layer_lc_trans_band"])]
)
)
return lc_dfs
def _prepare_population_dfs(params: Dict) -> DataFile:
population_dfs = []
for population_band, population_band_index, path in zip(
params["layer_population_bands"],
params["layer_population_band_indexes"],
params["layer_population_paths"]
):
population_dfs.append(
DataFile(
path=util.save_vrt(path, population_band_index),
bands=[Band(**population_band)]
)
)
return population_dfs
def _prepare_soil_organic_carbon_dfs(params: Dict) -> List[DataFile]:
soc_path = params["layer_soc_path"]
soc_dfs = [
DataFile(
path=util.save_vrt(soc_path, params["layer_soc_deg_band_index"]),
bands=[Band(**params["layer_soc_deg_band"])]
)
]
for soc_aux_band, soc_aux_band_index, in zip(
params["layer_soc_aux_bands"], params["layer_soc_aux_band_indexes"]
):
soc_dfs.append(
DataFile(
path=util.save_vrt(soc_path, soc_aux_band_index),
bands=[Band(**soc_aux_band)]
)
)
return soc_dfs
def _prepare_trends_earth_mode_dfs(
params: Dict
) -> Tuple[DataFile, DataFile, DataFile]:
traj_vrt_df = DataFile(
path=util.save_vrt(
params["layer_traj_path"],
params["layer_traj_band_index"],
),
bands=[Band(**params["layer_traj_band"])]
)
perf_vrt_df = DataFile(
path=util.save_vrt(
params["layer_perf_path"],
params["layer_perf_band_index"],
),
bands=[Band(**params["layer_perf_band"])]
)
state_vrt_df = DataFile(
path=util.save_vrt(
params["layer_state_path"],
params["layer_state_band_index"],
),
bands=[Band(**params["layer_state_band"])]
)
return traj_vrt_df, perf_vrt_df, state_vrt_df
def _prepare_jrc_lpd_mode_df(params: Dict) -> DataFile:
return DataFile(
path=util.save_vrt(
params["layer_lpd_path"], params["layer_lpd_band_index"]
),
bands=[Band(**params["layer_lpd_band"])]
)
def summarise_land_degradation(
ldn_job: Job, aoi: AOI, job_output_path: Path
) -> Job:
"""Calculate final SDG 15.3.1 indicator and save to disk"""
logger.debug('at top of compute_ldn')
summary_tables = {}
summary_table_stable_kwargs = {}
period_dfs = []
period_vrts = []
for period_name, period_params in ldn_job.params.items():
lc_dfs = _prepare_land_cover_dfs(period_params)
soc_dfs = _prepare_soil_organic_carbon_dfs(period_params)
population_dfs = _prepare_population_dfs(period_params)
logger.debug('len(population_dfs) %s', len(population_dfs))
logger.debug('population_dfs %s', population_dfs)
sub_job_output_path = job_output_path.parent / f"{job_output_path.stem}_{period_name}.json"
prod_mode = period_params["prod_mode"]
period_params['periods'] = {
'land_cover': period_params["layer_lc_deg_years"],
'soc': period_params["layer_soc_deg_years"]
}
if prod_mode == ProductivityMode.TRENDS_EARTH_5_CLASS_LPD.value:
period_params['periods']['productivity'] = period_params[
"layer_traj_years"]
elif prod_mode == ProductivityMode.JRC_5_CLASS_LPD.value:
period_params['periods']['productivity'] = period_params[
"layer_lpd_years"]
else:
raise Exception(f"Unknown productivity mode {prod_mode}")
# Add in period start/end if it isn't already in the parameters
# (wouldn't be if these layers were all run individually and not
# with the all-in-one tool)
if 'period' not in period_params:
period_params["period"] = {
"name":
period_name,
"year_initial":
period_params['periods']['productivity']['year_initial'],
"year_final":
period_params['periods']['productivity']['year_final']
}
nesting = period_params["layer_lc_deg_band"]["metadata"].get('nesting')
if nesting:
# Nesting is included only to ensure it goes into output, so if
# missing (as it might be for local data), it will be set to None
nesting = land_cover.LCLegendNesting.Schema().loads(nesting)
summary_table_stable_kwargs[period_name] = {
"aoi":
aoi,
"lc_legend_nesting":
nesting,
"lc_trans_matrix":
land_cover.LCTransitionDefinitionDeg.Schema().loads(
period_params["layer_lc_deg_band"]["metadata"]['trans_matrix'],
),
# "soc_legend_nesting":
# land_cover.LCLegendNesting.Schema().loads(
# period_params["layer_soc_deg_band"]["metadata"]['nesting'], ),
# "soc_trans_matrix":
# land_cover.LCTransitionDefinitionDeg.Schema().loads(
# period_params["layer_soc_deg_band"]["metadata"]
# ['trans_matrix'], ),
"output_job_path":
sub_job_output_path,
"period_name":
period_name,
"periods":
period_params['periods'],
}
if prod_mode == ProductivityMode.TRENDS_EARTH_5_CLASS_LPD.value:
traj, perf, state = _prepare_trends_earth_mode_dfs(period_params)
in_dfs = lc_dfs + soc_dfs + [traj, perf, state] + population_dfs
summary_table, sdg_path, reproj_path = _compute_ld_summary_table(
in_dfs=in_dfs,
prod_mode=ProductivityMode.TRENDS_EARTH_5_CLASS_LPD.value,
compute_bbs_from=traj.path,
**summary_table_stable_kwargs[period_name]
)
elif prod_mode == ProductivityMode.JRC_5_CLASS_LPD.value:
lpd_df = _prepare_jrc_lpd_mode_df(period_params)
in_dfs = lc_dfs + soc_dfs + [lpd_df] + population_dfs
summary_table, sdg_path, reproj_path = _compute_ld_summary_table(
in_dfs=in_dfs,
prod_mode=ProductivityMode.JRC_5_CLASS_LPD.value,
compute_bbs_from=lpd_df.path,
**summary_table_stable_kwargs[period_name],
)
else:
raise RuntimeError(f"Invalid prod_mode: {prod_mode!r}")
summary_tables[period_name] = summary_table
sdg_band = Band(
name=config.SDG_BAND_NAME,
no_data_value=config.NODATA_VALUE.item(), # write as python type
metadata={
'year_initial': period_params['period']['year_initial'],
'year_final': period_params['period']['year_final'],
},
activated=True
)
sdg_df = DataFile(sdg_path, [sdg_band])
so3_band_total = _get_so3_band_instance(
'total', period_params['periods']['productivity']
)
sdg_df.bands.append(so3_band_total)
if _have_pop_by_sex(population_dfs):
so3_band_female = _get_so3_band_instance(
'female', period_params['periods']['productivity']
)
sdg_df.bands.append(so3_band_female)
so3_band_male = _get_so3_band_instance(
'male', period_params['periods']['productivity']
)
sdg_df.bands.append(so3_band_male)
if prod_mode == ProductivityMode.TRENDS_EARTH_5_CLASS_LPD.value:
prod_band = Band(
name=config.TE_LPD_BAND_NAME,
no_data_value=config.NODATA_VALUE.item(
), # write as python type
metadata={
'year_initial':
period_params['periods']['productivity']['year_initial'],
'year_final':
period_params['periods']['productivity']['year_final']
},
activated=True
)
sdg_df.bands.append(prod_band)
reproj_df = combine_data_files(reproj_path, in_dfs)
# Don't add any of the input layers to the map by default - only SDG,
# prod, and SO3, which are already marked add_to_map=True
for band in reproj_df.bands:
band.add_to_map = False
period_vrt = job_output_path.parent / f"{sub_job_output_path.stem}_rasterdata.vrt"
util.combine_all_bands_into_vrt([sdg_path, reproj_path], period_vrt)
# Now that there is a single VRT with all files in it, combine the DFs
# into it so that it can be used to source band names/metadata for the
# job bands list
period_df = combine_data_files(period_vrt, [sdg_df, reproj_df])
for band in period_df.bands:
band.metadata['period'] = period_name
period_dfs.append(period_df)
period_vrts.append(period_vrt)
summary_table_output_path = sub_job_output_path.parent / f"{sub_job_output_path.stem}.xlsx"
save_summary_table_excel(
summary_table_output_path, summary_table, period_params["periods"],
period_params["layer_lc_years"], period_params["layer_soc_years"],
summary_table_stable_kwargs[period_name]['lc_legend_nesting'],
summary_table_stable_kwargs[period_name]['lc_trans_matrix'],
period_name
)
if len(ldn_job.params.items()) == 2:
# Make temporary combined VRT and DataFile just for the progress
# calculations. Don't save these in the output folder as at end of this
# process all the DFs will be combined and referenced to a VRT in that
# folder
temp_overall_vrt = Path(
tempfile.NamedTemporaryFile(suffix='.vrt', delete=False).name
)
util.combine_all_bands_into_vrt(period_vrts, temp_overall_vrt)
temp_df = combine_data_files(temp_overall_vrt, period_dfs)
progress_summary_table, progress_df = compute_progress_summary(
temp_df, prod_mode, job_output_path, aoi,
ldn_job.params['baseline']['period'],
ldn_job.params['progress']['period']
)
period_vrts.append(progress_df.path)
period_dfs.append(progress_df)
else:
progress_summary_table = None
overall_vrt_path = job_output_path.parent / f"{job_output_path.stem}.vrt"
util.combine_all_bands_into_vrt(period_vrts, overall_vrt_path)
out_df = combine_data_files(overall_vrt_path, period_dfs)
out_df.path = overall_vrt_path.name
# Also save bands to a key file for ease of use in PRAIS
key_json = job_output_path.parent / f"{job_output_path.stem}_band_key.json"
with open(key_json, 'w') as f:
json.dump(DataFile.Schema().dump(out_df), f, indent=4)
summary_json_output_path = job_output_path.parent / f"{job_output_path.stem}_summary.json"
report_json = save_reporting_json(
summary_json_output_path, summary_tables, progress_summary_table,
ldn_job.params, ldn_job.task_name, aoi, summary_table_stable_kwargs
)
ldn_job.results = RasterResults(
name='land_condition_summary',
uri=URI(uri=overall_vrt_path, type='local'),
rasters={
DataType.INT16.value:
Raster(
uri=URI(uri=overall_vrt_path, type='local'),
bands=out_df.bands,
datatype=DataType.INT16,
filetype=RasterFileType.COG,
),
},
data={'report': report_json}
)
ldn_job.end_date = dt.datetime.now(dt.timezone.utc)
ldn_job.progress = 100
return ldn_job
def _process_block_summary(
params: models.DegradationSummaryParams, in_array, mask, xoff: int,
yoff: int, cell_areas_raw
) -> Tuple[models.SummaryTableLD, Dict]:
lc_band_years = params.in_df.metadata_for_name(config.LC_BAND_NAME, 'year')
lc_bands = [
(band, year) for band, year in
zip(params.in_df.indices_for_name(config.LC_BAND_NAME), lc_band_years)
]
soc_bands = [
(band, year) for band, year in zip(
params.in_df.indices_for_name(config.SOC_BAND_NAME),
params.in_df.metadata_for_name(config.SOC_BAND_NAME, 'year')
)
]
# Create container for output arrays (will write later in main thread)
write_arrays = []
# Calculate cell area for each horizontal line
# logger.debug('y: {}'.format(y))
# logger.debug('x: {}'.format(x))
# logger.debug('rows: {}'.format(rows))
# Make an array of the same size as the input arrays containing
# the area of each cell (which is identical for all cells in a
# given row - cell areas only vary among rows)
cell_areas = np.repeat(cell_areas_raw, mask.shape[1],
axis=1).astype(np.float64)
if params.prod_mode == ProductivityMode.TRENDS_EARTH_5_CLASS_LPD.value:
traj_array = in_array[
params.in_df.index_for_name(config.TRAJ_BAND_NAME), :, :]
traj_recode = recode_traj(traj_array)
state_array = in_array[
params.in_df.index_for_name(config.STATE_BAND_NAME), :, :]
state_recode = recode_state(state_array)
perf_array = in_array[
params.in_df.index_for_name(config.PERF_BAND_NAME), :, :]
deg_prod5 = calc_prod5(traj_recode, state_recode, perf_array)
elif params.prod_mode == ProductivityMode.JRC_5_CLASS_LPD.value:
deg_prod5 = in_array[
params.in_df.index_for_name(config.JRC_LPD_BAND_NAME), :, :]
# TODO: Below is temporary until missing data values are
# fixed in LPD layer on GEE and missing data values are
# fixed in LPD layer made by UNCCD for SIDS
deg_prod5[(deg_prod5 == 0) | (deg_prod5 == 15)] = config.NODATA_VALUE
else:
raise Exception(f"Unknown productivity mode {prod_mode}")
# Recode | |
= None,
aad_tenant_id: Optional[str] = None,
super_user: Optional[str] = None,
website_name: Optional[str] = None,
**kwargs
):
"""
:keyword additional_properties: Unmatched properties from the message are deserialized to this
collection.
:paramtype additional_properties: dict[str, any]
:keyword qna_runtime_endpoint: (QnAMaker Only) The runtime endpoint of QnAMaker.
:paramtype qna_runtime_endpoint: str
:keyword qna_azure_search_endpoint_key: (QnAMaker Only) The Azure Search endpoint key of
QnAMaker.
:paramtype qna_azure_search_endpoint_key: str
:keyword qna_azure_search_endpoint_id: (QnAMaker Only) The Azure Search endpoint id of
QnAMaker.
:paramtype qna_azure_search_endpoint_id: str
:keyword statistics_enabled: (Bing Search Only) The flag to enable statistics of Bing Search.
:paramtype statistics_enabled: bool
:keyword event_hub_connection_string: (Personalization Only) The flag to enable statistics of
Bing Search.
:paramtype event_hub_connection_string: str
:keyword storage_account_connection_string: (Personalization Only) The storage account
connection string.
:paramtype storage_account_connection_string: str
:keyword aad_client_id: (Metrics Advisor Only) The Azure AD Client Id (Application Id).
:paramtype aad_client_id: str
:keyword aad_tenant_id: (Metrics Advisor Only) The Azure AD Tenant Id.
:paramtype aad_tenant_id: str
:keyword super_user: (Metrics Advisor Only) The super user of Metrics Advisor.
:paramtype super_user: str
:keyword website_name: (Metrics Advisor Only) The website name of Metrics Advisor.
:paramtype website_name: str
"""
super(ApiProperties, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.qna_runtime_endpoint = qna_runtime_endpoint
self.qna_azure_search_endpoint_key = qna_azure_search_endpoint_key
self.qna_azure_search_endpoint_id = qna_azure_search_endpoint_id
self.statistics_enabled = statistics_enabled
self.event_hub_connection_string = event_hub_connection_string
self.storage_account_connection_string = storage_account_connection_string
self.aad_client_id = aad_client_id
self.aad_tenant_id = aad_tenant_id
self.super_user = super_user
self.website_name = website_name
class CallRateLimit(msrest.serialization.Model):
"""The call rate limit Cognitive Services account.
:ivar count: The count value of Call Rate Limit.
:vartype count: float
:ivar renewal_period: The renewal period in seconds of Call Rate Limit.
:vartype renewal_period: float
:ivar rules:
:vartype rules: list[~azure.mgmt.cognitiveservices.models.ThrottlingRule]
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'float'},
'renewal_period': {'key': 'renewalPeriod', 'type': 'float'},
'rules': {'key': 'rules', 'type': '[ThrottlingRule]'},
}
def __init__(
self,
*,
count: Optional[float] = None,
renewal_period: Optional[float] = None,
rules: Optional[List["ThrottlingRule"]] = None,
**kwargs
):
"""
:keyword count: The count value of Call Rate Limit.
:paramtype count: float
:keyword renewal_period: The renewal period in seconds of Call Rate Limit.
:paramtype renewal_period: float
:keyword rules:
:paramtype rules: list[~azure.mgmt.cognitiveservices.models.ThrottlingRule]
"""
super(CallRateLimit, self).__init__(**kwargs)
self.count = count
self.renewal_period = renewal_period
self.rules = rules
class CheckDomainAvailabilityParameter(msrest.serialization.Model):
"""Check Domain availability parameter.
All required parameters must be populated in order to send to Azure.
:ivar subdomain_name: Required. The subdomain name to use.
:vartype subdomain_name: str
:ivar type: Required. The Type of the resource.
:vartype type: str
:ivar kind: The Kind of the resource.
:vartype kind: str
"""
_validation = {
'subdomain_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'subdomain_name': {'key': 'subdomainName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(
self,
*,
subdomain_name: str,
type: str,
kind: Optional[str] = None,
**kwargs
):
"""
:keyword subdomain_name: Required. The subdomain name to use.
:paramtype subdomain_name: str
:keyword type: Required. The Type of the resource.
:paramtype type: str
:keyword kind: The Kind of the resource.
:paramtype kind: str
"""
super(CheckDomainAvailabilityParameter, self).__init__(**kwargs)
self.subdomain_name = subdomain_name
self.type = type
self.kind = kind
class CheckSkuAvailabilityParameter(msrest.serialization.Model):
"""Check SKU availability parameter.
All required parameters must be populated in order to send to Azure.
:ivar skus: Required. The SKU of the resource.
:vartype skus: list[str]
:ivar kind: Required. The Kind of the resource.
:vartype kind: str
:ivar type: Required. The Type of the resource.
:vartype type: str
"""
_validation = {
'skus': {'required': True},
'kind': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'skus': {'key': 'skus', 'type': '[str]'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
skus: List[str],
kind: str,
type: str,
**kwargs
):
"""
:keyword skus: Required. The SKU of the resource.
:paramtype skus: list[str]
:keyword kind: Required. The Kind of the resource.
:paramtype kind: str
:keyword type: Required. The Type of the resource.
:paramtype type: str
"""
super(CheckSkuAvailabilityParameter, self).__init__(**kwargs)
self.skus = skus
self.kind = kind
self.type = type
class CommitmentCost(msrest.serialization.Model):
"""Cognitive Services account commitment cost.
:ivar commitment_meter_id: Commitment meter Id.
:vartype commitment_meter_id: str
:ivar overage_meter_id: Overage meter Id.
:vartype overage_meter_id: str
"""
_attribute_map = {
'commitment_meter_id': {'key': 'commitmentMeterId', 'type': 'str'},
'overage_meter_id': {'key': 'overageMeterId', 'type': 'str'},
}
def __init__(
self,
*,
commitment_meter_id: Optional[str] = None,
overage_meter_id: Optional[str] = None,
**kwargs
):
"""
:keyword commitment_meter_id: Commitment meter Id.
:paramtype commitment_meter_id: str
:keyword overage_meter_id: Overage meter Id.
:paramtype overage_meter_id: str
"""
super(CommitmentCost, self).__init__(**kwargs)
self.commitment_meter_id = commitment_meter_id
self.overage_meter_id = overage_meter_id
class CommitmentPeriod(msrest.serialization.Model):
"""Cognitive Services account commitment period.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tier: Commitment period commitment tier.
:vartype tier: str
:ivar count: Commitment period commitment count.
:vartype count: int
:ivar quota: Cognitive Services account commitment quota.
:vartype quota: ~azure.mgmt.cognitiveservices.models.CommitmentQuota
:ivar start_date: Commitment period start date.
:vartype start_date: str
:ivar end_date: Commitment period end date.
:vartype end_date: str
"""
_validation = {
'quota': {'readonly': True},
'start_date': {'readonly': True},
'end_date': {'readonly': True},
}
_attribute_map = {
'tier': {'key': 'tier', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'quota': {'key': 'quota', 'type': 'CommitmentQuota'},
'start_date': {'key': 'startDate', 'type': 'str'},
'end_date': {'key': 'endDate', 'type': 'str'},
}
def __init__(
self,
*,
tier: Optional[str] = None,
count: Optional[int] = None,
**kwargs
):
"""
:keyword tier: Commitment period commitment tier.
:paramtype tier: str
:keyword count: Commitment period commitment count.
:paramtype count: int
"""
super(CommitmentPeriod, self).__init__(**kwargs)
self.tier = tier
self.count = count
self.quota = None
self.start_date = None
self.end_date = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProxyResource, self).__init__(**kwargs)
class CommitmentPlan(ProxyResource):
"""Cognitive Services account commitment plan.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.cognitiveservices.models.SystemData
:ivar etag: Resource Etag.
:vartype etag: str
:ivar properties: Properties of Cognitive Services account commitment plan.
:vartype properties: ~azure.mgmt.cognitiveservices.models.CommitmentPlanProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'CommitmentPlanProperties'},
}
def __init__(
self,
*,
properties: Optional["CommitmentPlanProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of Cognitive Services account commitment plan.
:paramtype properties: ~azure.mgmt.cognitiveservices.models.CommitmentPlanProperties
"""
super(CommitmentPlan, self).__init__(**kwargs)
self.system_data = None
self.etag = None
self.properties = properties
class CommitmentPlanListResult(msrest.serialization.Model):
"""The list of cognitive services accounts operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar next_link: The link used to get the next page of CommitmentPlan.
:vartype next_link: str
:ivar value: Gets the list of Cognitive Services accounts CommitmentPlan and their properties.
:vartype value: list[~azure.mgmt.cognitiveservices.models.CommitmentPlan]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[CommitmentPlan]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword next_link: The link used to get the next page of CommitmentPlan.
:paramtype next_link: str
"""
super(CommitmentPlanListResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = None
class CommitmentPlanProperties(msrest.serialization.Model):
"""Properties of Cognitive Services account commitment plan.
Variables are only populated by the server, and will be ignored when sending a request.
| |
<gh_stars>0
# coding=utf-8
import calendar
import csv
import hashlib
import httplib
import json
import os
import time
from multiprocessing import Process
import datetime
from openpyxl import load_workbook
from scrapy import cmdline
class SpiderProcess(Process):
def __init__(self, cmd):
Process.__init__(self)
self.cmd = cmd
def run(self):
print 'main spider process execute'
cmdline.execute(self.cmd)
class Place4Week:
# 场地信息
p_name = ''
p_code = ''
wmq_name = ''
wmq_code = ''
ye_name = ''
ye_code = ''
ye_id = ''
open_time = ''
catchme_time = ''
ret = 0
cpt = 0
# 测试数据
test_gift = 0
test_income = 0.0
test_cost = 0.0
test_play = 0
# 周运营天数
r_work_day = 0
# 营收
r_income = 0.0
# 去娃娃盈利
r_earn = 0.0
# 抓取概率
r_probability = 0.0
# 掉落
r_gift = 0
# 游戏币
r_coin_buy = 0
# 派币
r_coin_free = 0
# 游戏次数
r_play_time = 0
# 渠道
r_channel = ''
# 扫码用户
r_user_enter = 0
# 充值用户
r_user_pay = 0
# 游戏用户
r_user_play = 0
# 扫码->充值转化率
r_user_enter2pay = 0.0
# 观影人次
r_user_cinema = 0
# 观影->扫码转化率
r_user_cinema2enter = 0.0
def __init__(self, place_info, mon, sun):
self.p_name = place_info['name']
self.p_code = md5(self.p_name)
self.wmq_name = place_info['wmq_name']
if len(self.wmq_name) > 0:
self.wmq_code = md5(self.wmq_name)
self.ye_name = place_info['ye_name']
if len(self.ye_name) > 0:
self.ye_code = md5(self.ye_name)
self.ye_id = int(place_info['ye_id'])
self.open_time = place_info['open_time']
self.catchme_time = place_info['start']
self.ret = int(place_info['ret'])
self.test_gift = int(place_info['test_gift'])
self.test_play = int(place_info['test_play'])
self.test_income = float(place_info['test_income'])
self.test_cost = float(place_info['test_cost'])
if time.strptime(str(mon), '%Y-%m-%d') \
<= time.strptime(self.catchme_time, '%Y-%m-%d') \
<= time.strptime(str(sun), '%Y-%m-%d'):
self.r_income = -self.test_income
self.r_play_time = -self.test_play
self.r_gift = -self.test_gift
# noinspection PyBroadException
def append_catchme(self, line, d):
if time.strptime(self.open_time, '%Y-%m-%d') <= time.strptime(str(d), '%Y-%m-%d'):
self.r_work_day += 1
self.r_income += float(line[2].value)
self.r_earn += float(line[13].value)
self.r_gift += int(line[10].value)
self.r_coin_buy += int(line[9].value) * self.cpt
self.r_coin_free += 0
self.r_play_time += int(line[9].value)
self.r_user_enter += int(line[5].value)
self.r_user_pay += int(line[7].value)
self.r_user_play += int(line[6].value)
try:
self.r_probability = float(self.r_gift) / float(self.r_play_time)
self.r_user_enter2pay = float(self.r_user_pay) / float(self.r_user_enter)
except Exception:
self.r_probability = 0.0
self.r_user_enter2pay = 0.0
# noinspection PyBroadException
def append_wmq(self, line, d):
if time.strptime(self.open_time, '%Y-%m-%d') <= time.strptime(str(d), '%Y-%m-%d'):
self.r_income += float(line[1])
self.r_earn += float(line[3])
self.r_gift += int(line[6])
self.r_coin_buy += int(line[7])
self.r_coin_free += int(line[8])
self.r_play_time += int(line[9])
try:
self.r_probability = float(self.r_gift) / float(self.r_play_time)
except Exception:
self.r_probability = 0.0
pass
# noinspection PyBroadException
def append_cinema(self, cinema):
self.r_user_cinema = cinema.audTotal
try:
self.r_user_cinema2enter = float(self.r_user_enter) / float(self.r_user_cinema)
except Exception:
self.r_user_cinema2enter = 0.0
# noinspection PyBroadException
def output(self):
line = [0] * 18
line[0] = str(self.p_name.encode('utf-8'))
line[1] = self.r_work_day
line[2] = self.r_income
line[3] = self.r_earn
line[4] = self.r_probability
line[5] = self.r_gift
line[6] = self.r_coin_buy
line[7] = self.r_coin_free
line[8] = self.r_play_time
line[9] = str(self.r_channel.encode('utf-8'))
line[10] = self.r_user_enter
line[11] = self.r_user_pay
line[12] = self.r_user_play
line[13] = self.r_user_enter2pay
line[14] = self.r_user_cinema
line[15] = self.r_user_cinema2enter
try:
line[16] = float(self.r_play_time) / float(self.r_user_play)
except Exception:
line[16] = 0.0
try:
line[17] = float(self.r_income) / float(self.r_play_time)
except Exception:
line[17] = 0.0
return line
def print_infos(self):
print('[' + self.r_channel.encode('utf-8') + '][' + self.p_name.encode('utf-8') + '][运营天数:'
+ str(self.r_work_day) + '][营收:' + str(self.r_income) + '][盈利:' + str(self.r_earn)
+ '][掉落:' + str(self.r_gift) + '][游戏币:' + str(self.r_coin_buy) + '][派币:' + str(self.r_coin_free)
+ '][游戏次数:' + str(self.r_play_time) + '][扫码用户:' + str(self.r_user_enter)
+ '][充值用户:' + str(self.r_user_pay) + '][游戏用户:' + str(self.r_user_play) + '][充值转化:'
+ str(self.r_user_enter2pay) + '][观影人次:' + str(self.r_user_cinema)
+ '][观影转化:' + str(self.r_user_cinema2enter) + ']')
class Channel4Week:
ch_code = ''
ch_key = ''
places = {}
cpt = 2
ch_name = ''
wmq_code_map = {}
day_of_month = 0
monday = ''
sunday = ''
def __init__(self, channel_info, mday, sday, dom):
self.places = {}
self.wmq_code_map = {}
self.ch_code = channel_info['code']
self.ch_key = md5(channel_info['name'])
self.cpt = int(channel_info['cpt'])
self.ch_name = channel_info['name']
self.day_of_month = dom
self.monday = mday
self.sunday = sday
if len(channel_info['include']) > 0:
for p in channel_info['include']:
if time.strptime(p['open_time'], '%Y-%m-%d') <= time.strptime(sday, '%Y-%m-%d'):
p4week = Place4Week(p, mday, sday)
p4week.cpt = self.cpt
p4week.r_channel = self.ch_name
self.places[p4week.p_code] = p4week
if len(p4week.wmq_code) > 0:
self.wmq_code_map[p4week.wmq_code] = p4week.p_code
def append_catchme(self, line, d):
p_code = md5(line[0].value, True)
if p_code in self.places.keys():
self.places[p_code].append_catchme(line, d)
def append_weimaqi(self, line, d):
wmq_code = md5(line[0], False)
if wmq_code in self.wmq_code_map.keys():
if self.wmq_code_map[wmq_code] in self.places.keys():
self.places[self.wmq_code_map[wmq_code]].append_wmq(line, d)
# noinspection PyBroadException
def output(self):
l_income = 0.0
l_earn = 0.0
l_probability = 0.0
l_gift = 0
l_coin_buy = 0
l_coin_free = 0
l_play_time = 0
l_user_enter = 0
l_user_pay = 0
l_user_play = 0
l_user_enter2pay = 0.0
l_user_cinema = 0
l_user_cinema2enter = 0.0
l_ret = 0.0
valid_user_enter = 0
valid_user_cinema = 0
for p in self.places.values():
l_income += p.r_income
l_earn += p.r_earn
l_gift += p.r_gift
l_coin_buy += p.r_coin_buy
l_coin_free += p.r_coin_free
l_play_time += p.r_play_time
l_user_enter += p.r_user_enter
l_user_pay += p.r_user_pay
l_user_play += p.r_user_play
l_ret += p.r_work_day * p.ret / self.day_of_month
l_user_cinema += p.r_user_cinema
if p.r_user_cinema > 0 \
and time.strptime(p.catchme_time, '%Y-%m-%d') <= time.strptime(str(sunday), '%Y-%m-%d'):
valid_user_cinema += p.r_user_cinema
valid_user_enter += p.r_user_enter
try:
l_user_cinema2enter = float(valid_user_enter) / float(valid_user_cinema)
except Exception:
pass
try:
l_user_enter2pay = float(l_user_pay) / float(l_user_enter)
except Exception:
pass
try:
l_probability = float(l_gift) / float(l_play_time)
except Exception:
pass
line = [0] * 18
line[0] = str(self.ch_name.encode('utf-8'))
line[1] = '/'
line[2] = l_income
line[3] = l_earn
line[4] = l_probability
line[5] = l_gift
line[6] = l_coin_buy
line[7] = l_coin_free
line[8] = l_play_time
line[9] = '/'
line[10] = l_user_enter
line[11] = l_user_pay
line[12] = l_user_play
line[13] = l_user_enter2pay
line[14] = l_user_cinema
line[15] = l_user_cinema2enter
try:
line[16] = float(l_play_time) / float(l_user_play)
except Exception:
line[16] = 0.0
try:
line[17] = float(l_income) / float(l_play_time)
except Exception:
line[17] = 0.0
return line, valid_user_enter, valid_user_cinema
class Cinema4Week:
cinemaCode = ''
cinemaId = '' # 影院ID
cinemaName = '' # 影院名称
amount = 0.0 # 当周票房
scenes = 0.0 # 当周场次
avgScreen = 0.0 # 单荧幕平均周票房
avgPS = 0.0 # 场均人次
screen_yield = 0.0 # 单日单厅票房
scenes_time = 0.0 # 单日单厅场次
audTotal = 0 # 周观影人次
def __init__(self, line):
self.cinemaId = int(line[0])
self.cinemaName = line[1]
self.amount = float(line[2])
self.scenes = float(line[3])
self.avgScreen = float(line[4])
self.avgPS = float(line[5])
self.screen_yield = float(line[6])
self.scenes_time = float(line[7])
self.audTotal = self.scenes * self.avgPS
self.cinemaCode = md5(self.cinemaName, False)
pass
class Channel4Day:
ch_code = ''
ch_key = ''
places = []
revenue = {}
revenue_wmq = []
cpt = 2
revenue_cvt = {}
ret = 0
name = ''
with_wmq = True
close_place = []
def __init__(self, channel_info, ysd):
self.ch_code = channel_info['code']
self.ch_key = md5(channel_info['name'])
self.cpt = int(channel_info['cpt'])
self.places = []
self.revenue = {}
self.revenue_wmq = []
self.revenue_cvt = {}
self.name = channel_info['name']
struct_t = time.strptime(ysd, '%Y-%m-%d')
self.with_wmq = bool(channel_info['wmq'])
if len(channel_info['include']) > 0:
for place in channel_info['include']:
if time.strptime(ysd, '%Y-%m-%d') >= time.strptime(place['start'], '%Y-%m-%d'):
place_key = md5(place['name'])
wmq_key = md5(place['wmq_name'])
self.revenue_cvt[wmq_key] = place_key
if bool(place['open']):
self.places.append(place_key)
self.revenue[place_key] = [0] * 19
self.revenue[place_key][0] = str(place['name'].encode('utf-8'))
self.revenue[place_key][11] = int(place['disable'])
self.revenue[place_key][12] = str(self.name.encode('utf-8'))
if place['start'] == ysd:
self.revenue[place_key][1] = -float(place['test_income'])
self.revenue[place_key][3] = -float(place['test_income']) + float(place['test_cost'])
self.revenue[place_key][6] = -float(place['test_gift'])
self.revenue[place_key][9] = -float(place['test_play'])
self.revenue[place_key][7] = -int(place['test_play'] * self.cpt)
else:
self.close_place.append(place_key)
if time.strptime(ysd, '%Y-%m-%d') >= time.strptime(place['open_time'], '%Y-%m-%d'):
self.ret += float(place['ret'])
self.ret = self.ret / calendar.monthrange(struct_t.tm_year, struct_t.tm_mon)[1]
# noinspection PyBroadException
def append_weimaqi_data(self, input_line):
wmq_key = md5(input_line[0], False)
if wmq_key in self.revenue_cvt.keys() and self.revenue_cvt[wmq_key] in self.revenue.keys():
revenue_line = self.revenue[self.revenue_cvt[wmq_key]]
revenue_line[1] += float(input_line[1])
revenue_line[3] += float(input_line[3])
revenue_line[6] += int(input_line[6])
revenue_line[9] += int(input_line[9])
revenue_line[7] += int(input_line[7])
revenue_line[8] += int(input_line[8])
try:
revenue_line[5] = float(revenue_line[6]) / float(revenue_line[9])
except Exception:
revenue_line[5] = 0.0
if int(revenue_line[10]) == 0 and int(revenue_line[11]) == 0:
revenue_line[10] = int(input_line[10])
revenue_line[11] = int(input_line[11])
revenue_line[2] = float(revenue_line[1]) / (revenue_line[10] + revenue_line[11])
revenue_line[4] = revenue_line[3] / (revenue_line[10] + revenue_line[11])
try:
revenue_line[17] = float(revenue_line[9]) / float(revenue_line[15])
except Exception:
revenue_line[17] = 0.0
try:
revenue_line[18] = float(revenue_line[1]) / float(revenue_line[9])
except Exception:
revenue_line[18] = 0.0
else:
if wmq_key in self.revenue_cvt.keys() and self.revenue_cvt[wmq_key] in self.close_place:
return
wmq_line = [0] * 19
wmq_line[0] = input_line[0]
wmq_line[1] = input_line[1]
wmq_line[2] = input_line[2]
wmq_line[3] = input_line[3]
wmq_line[4] = input_line[4]
wmq_line[5] = input_line[5]
wmq_line[6] = input_line[6]
wmq_line[7] = input_line[7]
wmq_line[8] = input_line[8]
wmq_line[9] = input_line[9]
wmq_line[10] = input_line[10]
wmq_line[11] = input_line[11]
wmq_line[12] = str(self.name.encode('utf-8'))
wmq_line[13] = 0
wmq_line[14] = 0
wmq_line[15] = 0
wmq_line[16] = 0
wmq_line[17] = 0.0
wmq_line[18] = 0.0
self.revenue_wmq.append(wmq_line)
# noinspection PyBroadException
def append_catchme_data(self, input_line):
place_key = md5(input_line[0].value, True)
if place_key in self.revenue.keys():
revenue_line = self.revenue[place_key]
# 营收
revenue_line[1] | |
range 0.0 to '
'1.0 if "relative_boundaries" are True')
else:
if 'p1' in mask_data:
if (
isinstance(mask_data['p1'], list) and
len(mask_data['p1']) == 2 and
isinstance(mask_data['p1'][0], int) and
isinstance(mask_data['p1'][1], int)):
mask.p1 = mask_data['p1']
else:
loading_warning = (
'"p1" property must be an integer if '
'"relative_boundaries" are False')
if 'p2' in mask_data:
if (
isinstance(mask_data['p2'], list) and
len(mask_data['p2']) == 2 and
isinstance(mask_data['p2'][0], int) and
isinstance(mask_data['p2'][1], int)):
mask.p2 = mask_data['p2']
else:
loading_warning = (
'"p2" property must be an integer if '
'"relative_boundaries" are False')
if mask_type in [
UvMaskTypes.GRADIENT_MASK.value, UvMaskTypes.STRIPES_MASK.value]:
if 'stripes' in mask_data:
stripes = mask_data['stripes']
if not isinstance(stripes, list):
loading_warning = '"stripes" property must be a list.'
else:
for stripe_data in stripes:
if not isinstance(stripe_data, dict):
loading_warning = (
'Every stripe in the stripes list must be an '
'object')
continue
stripe = mask.stripes.add()
if 'width' in stripe_data:
width = stripe_data['width']
if relative_boundaries:
if (
isinstance(width, (float, int)) and
0.0 <= width <= 1.0):
stripe.width_relative = width
else:
loading_warning = (
"Stripe width must be a float in "
"range 0.0 to 1.0 if "
"relative_boundaries is True")
else:
if isinstance(width, int):
stripe.width = width
else:
loading_warning = (
"Stripe width must be an integer if "
"relative_boundaries is True")
if 'strength' in stripe_data:
strength = stripe_data['strength']
if isinstance(strength, (float, int)):
stripe.strength = strength
else:
loading_warning = (
'Stripe strength must be a float.')
if mask_type in [
UvMaskTypes.GRADIENT_MASK.value, UvMaskTypes.ELLIPSE_MASK.value,
UvMaskTypes.RECTANGLE_MASK.value, UvMaskTypes.MIX_MASK.value,
UvMaskTypes.RANDOM_MASK.value]:
if 'expotent' in mask_data:
expotent = mask_data['expotent']
if isinstance(expotent, (float, int)):
mask.expotent = mask_data['expotent']
else:
loading_warning = 'Expotent property must be a float.'
if mask_type in [
UvMaskTypes.ELLIPSE_MASK.value,
UvMaskTypes.RECTANGLE_MASK.value, UvMaskTypes.MIX_MASK.value,
UvMaskTypes.RANDOM_MASK.value]:
if 'strength' in mask_data:
strength = mask_data['strength']
if (
isinstance(strength, list) and len(strength) == 2 and
isinstance(strength[0], (float, int)) and
isinstance(strength[1], (float, int)) and
0.0 <= strength[0] <= 1.0 and
0.0 <= strength[1] <= 1.0):
mask.strength = mask_data['strength']
else:
loading_warning = (
'"strength" property must be a list of '
'two floats in range 0.0 to 1.0.')
if mask_type in [
UvMaskTypes.ELLIPSE_MASK.value,
UvMaskTypes.RECTANGLE_MASK.value]:
if 'hard_edge' in mask_data:
if isinstance(mask_data['hard_edge'], bool):
hard_edge = mask_data['hard_edge']
mask.hard_edge = hard_edge
else:
loading_warning = '"hard_edge" property must be a boolean'
if mask_type == UvMaskTypes.STRIPES_MASK.value:
if 'horizontal' in mask_data:
if isinstance(mask_data['horizontal'], bool):
horizontal = mask_data['horizontal']
mask.horizontal = horizontal
else:
loading_warning = '"horizontal" property must be a boolean'
if mask_type == UvMaskTypes.RANDOM_MASK.value:
if 'use_seed' in mask_data:
if isinstance(mask_data['use_seed'], bool):
use_seed = mask_data['use_seed']
mask.use_seed = use_seed
else:
loading_warning = '"use_seed" property must be a boolean'
if 'seed' in mask_data:
seed = mask_data['seed']
if isinstance(seed, int):
mask.seed = mask_data['seed']
else:
loading_warning = '"seed" property must be an interger.'
if mask_type == UvMaskTypes.COLOR_MASK.value:
if 'color' in mask_data:
color_data = mask_data['color']
if (
not isinstance(color_data, list) or
len(color_data) != 3):
loading_warning = (
'Every color on colors list should be '
'a list of floats.')
else:
is_color = True
for value_data in color_data:
if not isinstance(value_data, (float, int)):
is_color = False
loading_warning =(
'All values of color must be '
'floats in range 0.0-1.0')
break
if is_color:
mask.color.color = color_data
return loading_warning
def _load_side(self, side: Any, side_data: List) -> Optional[str]:
loading_warning = None
for mask_data in side_data:
loading_warning = self._load_mask_data(mask_data, side)
return loading_warning
def execute(self, context):
name: str = get_unused_uv_group_name('uv_group')
# Save file and finish
try:
with open(self.filepath, 'r') as f:
data = json.load(f, cls=JSONCDecoder)
version = data['version']
if version != 1:
self.report({'ERROR'}, "Unknown UV-group version.")
return {'CANCELLED'}
except (KeyError, TypeError, JSONDecodeError):
self.report({'ERROR'}, "Unable to to read the UV-group data.")
return {'CANCELLED'}
# Create new UV-group
len_groups = len(context.scene.mcblend_uv_groups)
# Add new uv_group and set its properties
uv_group_new = context.scene.mcblend_uv_groups.add()
len_groups = len(context.scene.mcblend_uv_groups)
context.scene.mcblend_active_uv_group=len_groups-1
# Currently only version 1 is supported
if 'name' in data and isinstance(data['name'], str):
name = get_unused_uv_group_name(data['name'])
uv_group_new.name = name
# Used for showing warnings about loading process (the loader shows
# only one warning at a time)
loading_warning: Optional[str] = None
if 'side1' in data and isinstance(data['side1'], list):
loading_warning=self._load_side(uv_group_new.side1, data['side1'])
if 'side2' in data and isinstance(data['side2'], list):
loading_warning=self._load_side(uv_group_new.side2, data['side2'])
if 'side3' in data and isinstance(data['side3'], list):
loading_warning=self._load_side(uv_group_new.side3, data['side3'])
if 'side4' in data and isinstance(data['side4'], list):
loading_warning=self._load_side(uv_group_new.side4, data['side4'])
if 'side5' in data and isinstance(data['side5'], list):
loading_warning=self._load_side(uv_group_new.side5, data['side5'])
if 'side6' in data and isinstance(data['side6'], list):
loading_warning=self._load_side(uv_group_new.side6, data['side6'])
# If something didn't load propertly also display a warning
if loading_warning is not None:
self.report({'WARNING'}, loading_warning)
if context.area is not None: # There is no area when running from CLI
context.area.tag_redraw()
return {'FINISHED'}
# Events
class MCBLEND_OT_AddEvent(bpy.types.Operator):
'''Operator used for adding events to scene.'''
bl_idname = "mcblend.add_event"
bl_label = '''Adds new event to scene.'''
bl_options = {'UNDO', 'INTERNAL'}
def execute(self, context):
event_new = bpy.context.scene.mcblend_events.add()
event_new.name = get_unused_event_name('event')
return {'FINISHED'}
class MCBLEND_OT_RemoveEvent(bpy.types.Operator):
'''Operator used for removing events.'''
bl_idname = "mcblend.remove_event"
bl_label = '''Removes event from scene.'''
bl_options = {'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context: bpy_types.Context):
events = bpy.context.scene.mcblend_events
active_event_id = bpy.context.scene.mcblend_active_event
if not 0 <= active_event_id < len(events):
return False
return True
def execute(self, context):
active_event_id = bpy.context.scene.mcblend_active_event
# Remove animation
bpy.context.scene.mcblend_events.remove(
active_event_id)
# Set new active event
if active_event_id > 0:
bpy.context.scene.mcblend_active_event=active_event_id-1
return {'FINISHED'}
class MCBLEND_OT_AddEffect(bpy.types.Operator):
'''Operator used for adding effects to events.'''
bl_idname = "mcblend.add_effect"
bl_label = '''Adds new effect to active event.'''
bl_options = {'UNDO', 'INTERNAL'}
effect_type: EnumProperty( # type: ignore
items=list_effect_types_as_blender_enum, name='Effect type'
)
@classmethod
def poll(cls, context: bpy_types.Context):
events = bpy.context.scene.mcblend_events
active_event_id = bpy.context.scene.mcblend_active_event
if not 0 <= active_event_id < len(events):
return False
return True
def execute(self, context):
events = bpy.context.scene.mcblend_events
active_event_id = bpy.context.scene.mcblend_active_event
event = events[active_event_id]
effect = event.effects.add()
effect.effect_type = self.effect_type
return {'FINISHED'}
class MCBLEND_OT_RemoveEffect(bpy.types.Operator):
'''Operator used for removeing effects effects from events.'''
bl_idname = "mcblend.remove_effect"
bl_label = '''Remove effect from active event.'''
bl_options = {'UNDO', 'INTERNAL'}
effect_index: IntProperty() # type: ignore
@classmethod
def poll(cls, context: bpy_types.Context):
events = bpy.context.scene.mcblend_events
active_event_id = bpy.context.scene.mcblend_active_event
if not 0 <= active_event_id < len(events):
return False
event = events[active_event_id]
effects = event.effects
if len(effects) <= 0:
return False
return True
def execute(self, context):
events = bpy.context.scene.mcblend_events
active_event_id = bpy.context.scene.mcblend_active_event
event = events[active_event_id]
event.effects.remove(self.effect_index)
return {'FINISHED'}
# Project - RP entity import
class MCBLEND_OT_ReloadRp(bpy.types.Operator):
'''Imports entity form Minecraft project into blender'''
# pylint: disable=unused-argument, no-member
bl_idname = "mcblend.reload_rp"
bl_label = "Import entity"
bl_options = {'REGISTER'}
bl_description = "Reloads the list of the entities from the resource pack."
def execute(self, context):
reload_rp_entities(context)
return {'FINISHED'}
class MCBLEND_OT_ImportRpEntity(bpy.types.Operator):
'''Imports entity form Minecraft project into blender'''
# pylint: disable=unused-argument, no-member
bl_idname = "mcblend.import_rp_entity"
bl_label = "Import entity from pack"
bl_options = {'UNDO', 'INTERNAL'}
bl_description = "Import entity by it's name from the resource pack."
@classmethod
def poll(cls, context: bpy_types.Context):
return len(context.scene.mcblend_project.entities) > 0
def execute(self, context):
try:
warnings = import_model_form_project(context)
if len(warnings) > 1:
for warning in warnings:
self.report({'WARNING'}, warning)
self.report(
{'WARNING'},
f"Finished with {len(warnings)} warnings. "
"See logs for more details."
)
elif len(warnings) == 1:
self.report({'WARNING'}, warnings[0])
except ImporterException as e:
self.report(
{'ERROR'}, f'Invalid model: {e}'
)
return {'FINISHED'}
# Armature render controllers
class MCBLEND_OT_AddFakeRc(bpy.types.Operator):
'''Adds new render controller to active model (armature).'''
bl_idname = "mcblend.add_fake_rc"
bl_label = 'Adds new render controller'
bl_options = {'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context: bpy_types.Context):
return context.object.type == 'ARMATURE'
def execute(self, context):
obj = context.object
rc = obj.mcblend.render_controllers.add()
material = rc.materials.add()
material.pattern = '*'
material.material = 'entity_alphatest'
return {'FINISHED'}
class MCBLEND_OT_RemoveFakeRc(bpy.types.Operator):
'''Removes render controller from active model (armature).'''
bl_idname = "mcblend.remove_fake_rc"
bl_label = 'Removes render controller'
bl_options = {'UNDO', 'INTERNAL'}
rc_index: IntProperty() # type: ignore
@classmethod
def poll(cls, context: bpy_types.Context):
return context.object.type == 'ARMATURE'
def execute(self, context):
rcs = context.object.mcblend.render_controllers
rcs.remove(self.rc_index)
return {'FINISHED'}
class MCBLEND_OT_MoveFakeRc(bpy.types.Operator):
'''Moves render controller in active to a different spot on the list.'''
bl_idname = "mcblend.move_fake_rc"
bl_label = 'Moves render controller'
bl_options = {'UNDO', 'INTERNAL'}
rc_index: IntProperty() # type: ignore
move_to: IntProperty() # type: ignore
@classmethod
def poll(cls, context: bpy_types.Context):
return context.object.type == 'ARMATURE'
def execute(self, context):
rcs = context.object.mcblend.render_controllers
rcs.move(self.rc_index, self.move_to)
return {'FINISHED'}
class MCBLEND_OT_FakeRcSelectTexture(bpy.types.Operator):
'''Selects the name of the texture for render controller of a model.'''
bl_idname = "mcblend.fake_rc_select_texture"
bl_label = "Selects the texture name"
bl_options = {'UNDO', | |
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'context_type': context_type,
})
def get_0(self,
context_type,
id,
):
"""
Get firewall status for target resource in dfw context
:type context_type: :class:`str`
:param context_type: (required)
:type id: :class:`str`
:param id: (required)
:rtype: :class:`com.vmware.nsx.model_client.TargetResourceStatus`
:return: com.vmware.nsx.model.TargetResourceStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get_0',
{
'context_type': context_type,
'id': id,
})
def list(self):
"""
List all firewall status for supported contexts
:rtype: :class:`com.vmware.nsx.model_client.FirewallStatusListResult`
:return: com.vmware.nsx.model.FirewallStatusListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list', None)
def update(self,
context_type,
firewall_status,
):
"""
Update global firewall status for dfw context
:type context_type: :class:`str`
:param context_type: (required)
:type firewall_status: :class:`com.vmware.nsx.model_client.FirewallStatus`
:param firewall_status: (required)
:rtype: :class:`com.vmware.nsx.model_client.FirewallStatus`
:return: com.vmware.nsx.model.FirewallStatus
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'context_type': context_type,
'firewall_status': firewall_status,
})
class _ExcludelistStub(ApiInterfaceStub):
def __init__(self, config):
# properties for addmember operation
addmember_input_type = type.StructType('operation-input', {
'resource_reference': type.ReferenceType('com.vmware.nsx.model_client', 'ResourceReference'),
})
addmember_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
addmember_input_value_validator_list = [
]
addmember_output_validator_list = [
]
addmember_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/excludelist?action=add_member',
request_body_parameter='resource_reference',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for checkifexists operation
checkifexists_input_type = type.StructType('operation-input', {
'object_id': type.StringType(),
'deep_check': type.OptionalType(type.BooleanType()),
'object_type': type.OptionalType(type.StringType()),
})
checkifexists_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
checkifexists_input_value_validator_list = [
]
checkifexists_output_validator_list = [
]
checkifexists_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/excludelist?action=check_if_exists',
path_variables={
},
query_parameters={
'object_id': 'object_id',
'deep_check': 'deep_check',
'object_type': 'object_type',
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/firewall/excludelist',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for removemember operation
removemember_input_type = type.StructType('operation-input', {
'object_id': type.StringType(),
'deep_check': type.OptionalType(type.BooleanType()),
'object_type': type.OptionalType(type.StringType()),
})
removemember_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
removemember_input_value_validator_list = [
]
removemember_output_validator_list = [
]
removemember_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/excludelist?action=remove_member',
path_variables={
},
query_parameters={
'object_id': 'object_id',
'deep_check': 'deep_check',
'object_type': 'object_type',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'exclude_list': type.ReferenceType('com.vmware.nsx.model_client', 'ExcludeList'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/firewall/excludelist',
request_body_parameter='exclude_list',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'addmember': {
'input_type': addmember_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ResourceReference'),
'errors': addmember_error_dict,
'input_value_validator_list': addmember_input_value_validator_list,
'output_validator_list': addmember_output_validator_list,
'task_type': TaskType.NONE,
},
'checkifexists': {
'input_type': checkifexists_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ResourceReference'),
'errors': checkifexists_error_dict,
'input_value_validator_list': checkifexists_input_value_validator_list,
'output_validator_list': checkifexists_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ExcludeList'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'removemember': {
'input_type': removemember_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ResourceReference'),
'errors': removemember_error_dict,
'input_value_validator_list': removemember_input_value_validator_list,
'output_validator_list': removemember_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'ExcludeList'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'addmember': addmember_rest_metadata,
'checkifexists': checkifexists_rest_metadata,
'get': get_rest_metadata,
'removemember': removemember_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.firewall.excludelist',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _ProfilesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'base_firewall_profile': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseFirewallProfile')]),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
HasFieldsOfValidator()
]
create_output_validator_list = [
HasFieldsOfValidator()
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/profiles',
request_body_parameter='base_firewall_profile',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'profile_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/firewall/profiles/{profile-id}',
path_variables={
'profile_id': 'profile-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'profile_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/firewall/profiles/{profile-id}',
path_variables={
'profile_id': 'profile-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'resource_type': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/firewall/profiles',
path_variables={
},
query_parameters={
'resource_type': 'resource_type',
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'profile_id': type.StringType(),
'base_firewall_profile': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseFirewallProfile')]),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/firewall/profiles/{profile-id}',
request_body_parameter='base_firewall_profile',
path_variables={
'profile_id': 'profile-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseFirewallProfile')]),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseFirewallProfile')]),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'FirewallProfileListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx.model_client', 'BaseFirewallProfile')]),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.firewall.profiles',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _RulesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'rule_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/firewall/rules/{rule-id}',
path_variables={
'rule_id': 'rule-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'FirewallRule'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.firewall.rules',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _SectionsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'firewall_section': type.ReferenceType('com.vmware.nsx.model_client', 'FirewallSection'),
'id': type.OptionalType(type.StringType()),
'operation': type.OptionalType(type.StringType()),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/sections',
request_body_parameter='firewall_section',
path_variables={
},
query_parameters={
'id': 'id',
'operation': 'operation',
},
content_type='application/json'
)
# properties for createwithrules operation
createwithrules_input_type = type.StructType('operation-input', {
'firewall_section_rule_list': type.ReferenceType('com.vmware.nsx.model_client', 'FirewallSectionRuleList'),
'id': type.OptionalType(type.StringType()),
'operation': type.OptionalType(type.StringType()),
})
createwithrules_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
createwithrules_input_value_validator_list = [
HasFieldsOfValidator()
]
createwithrules_output_validator_list = [
HasFieldsOfValidator()
]
createwithrules_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/firewall/sections?action=create_with_rules',
request_body_parameter='firewall_section_rule_list',
path_variables={
},
query_parameters={
'id': 'id',
'operation': 'operation',
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'section_id': type.StringType(),
'cascade': type.OptionalType(type.BooleanType()),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/firewall/sections/{section-id}',
path_variables={
'section_id': 'section-id',
},
query_parameters={
'cascade': 'cascade',
},
content_type='application/json'
)
# properties for get | |
<reponame>dutrow/crits
from crits.vocabulary.vocab import vocab
class Common(vocab):
READ = "read"
WRITE = "write"
DELETE = "delete"
DOWNLOAD = "download"
ALIASES_READ = "aliases_read"
ALIASES_EDIT = "aliases_edit"
DESCRIPTION_READ = "description_read"
DESCRIPTION_EDIT = "description_edit"
ACTIONS_READ = "actions_read"
ACTIONS_ADD = "actions_add"
ACTIONS_EDIT = "actions_edit"
ACTIONS_DELETE = "actions_delete"
BUCKETLIST_READ = "bucketlist_read"
BUCKETLIST_EDIT = "bucketlist_edit"
# Only used to check in the template if the user should be able to view campaigns
CAMPAIGN_READ = "Campaign.read"
CAMPAIGNS_READ = "campaigns_read"
CAMPAIGNS_ADD = "campaigns_add"
CAMPAIGNS_EDIT = "campaigns_edit"
CAMPAIGNS_DELETE = "campaigns_delete"
COMMENTS_READ = "comments_read"
COMMENTS_ADD = "comments_add"
COMMENTS_EDIT = "comments_edit"
COMMENTS_DELETE = "comments_delete"
LOCATIONS_READ = "locations_read"
LOCATIONS_ADD = "locations_add"
LOCATIONS_EDIT = "locations_edit"
LOCATIONS_DELETE = "locations_delete"
OBJECTS_READ = "objects_read"
OBJECTS_ADD = "objects_add"
OBJECTS_EDIT = "objects_edit"
OBJECTS_DELETE = "objects_delete"
RELATIONSHIPS_READ = "relationships_read"
RELATIONSHIPS_ADD = "relationships_add"
RELATIONSHIPS_EDIT = "relationships_edit"
RELATIONSHIPS_DELETE = "relationships_delete"
RELEASABILITY_READ = "releasability_read"
RELEASABILITY_ADD = "releasability_add"
RELEASABILITY_DELETE = "releasability_delete"
SCREENSHOTS_READ = "screenshots_read"
SCREENSHOTS_ADD = "screenshots_add"
SCREENSHOTS_DELETE = "screenshots_delete"
SECTORS_READ = "sectors_read"
SECTORS_EDIT = "sectors_edit"
SERVICES_READ = "services_read"
SERVICES_EXECUTE = "services_execute"
SOURCES_READ = "sources_read"
SOURCES_ADD = "sources_add"
SOURCES_EDIT = "sources_edit"
SOURCES_DELETE = "sources_delete"
STATUS_READ = "status_read"
STATUS_EDIT = "status_edit"
TICKETS_READ = "tickets_read"
TICKETS_ADD = "tickets_add"
TICKETS_EDIT = "tickets_edit"
TICKETS_DELETE = "tickets_delete"
class GeneralACL(vocab):
"""
Vocabulary for General ACLs
"""
API_INTERFACE = "api_interface"
SCRIPT_INTERFACE = "script_interface"
WEB_INTERFACE = "web_interface"
ADD_NEW_ACTOR_IDENTIFIER_TYPE = "add_new_actor_identifier_type"
ADD_NEW_INDICATOR_ACTION = "add_new_indicator_action"
ADD_NEW_RAW_DATA_TYPE = "add_new_raw_data_type"
ADD_NEW_SIGNATURE_DEPENDENCY = "add_new_signature_dependency"
ADD_NEW_SIGNATURE_TYPE = "add_new_signature_type"
ADD_NEW_SOURCE = "add_new_source"
ADD_NEW_USER_ROLE = "add_new_user_role"
ADD_NEW_TLDS = "add_new_tlds"
CONTROL_PANEL_READ = "control_panel_read"
CONTROL_PANEL_SYSTEM_READ = "control_panel_system_read"
CONTROL_PANEL_GENERAL_READ = "control_panel_general_read"
CONTROL_PANEL_GENERAL_EDIT = "control_panel_general_edit"
CONTROL_PANEL_CRITS_READ = "control_panel_crits_read"
CONTROL_PANEL_CRITS_EDIT = "control_panel_crits_edit"
CONTROL_PANEL_LDAP_READ = "control_panel_ldap_read"
CONTROL_PANEL_LDAP_EDIT = "control_panel_ldap_edit"
CONTROL_PANEL_SECURITY_READ = "control_panel_security_read"
CONTROL_PANEL_SECURITY_EDIT = "control_panel_security_edit"
CONTROL_PANEL_DOWNLOADING_READ = "control_panel_downloading_read"
CONTROL_PANEL_DOWNLOADING_EDIT = "control_panel_downloading_edit"
CONTROL_PANEL_SYSTEM_SERVICES_READ = "control_panel_system_services_read"
CONTROL_PANEL_SYSTEM_SERVICES_EDIT = "control_panel_system_services_edit"
CONTROL_PANEL_LOGGING_READ = "control_panel_logging_read"
CONTROL_PANEL_LOGGING_EDIT = "control_panel_logging_edit"
CONTROL_PANEL_ITEMS_READ = "control_panel_items_read"
CONTROL_PANEL_USERS_READ = "control_panel_users_read"
CONTROL_PANEL_USERS_ADD = "control_panel_users_add"
CONTROL_PANEL_USERS_EDIT = "control_panel_users_edit"
CONTROL_PANEL_USERS_ACTIVE = "control_panel_users_active"
CONTROL_PANEL_ROLES_READ = "control_panel_roles_read"
CONTROL_PANEL_ROLES_EDIT = "control_panel_roles_edit"
CONTROL_PANEL_SERVICES_READ = "control_panel_services_read"
CONTROL_PANEL_SERVICES_EDIT = "control_panel_services_edit"
CONTROL_PANEL_AUDIT_LOG_READ = "control_panel_audit_log_read"
RECENT_ACTIVITY_READ = "recent_activity_read"
STIX_IMPORT_ADD = "stix_import_add"
DNS_TIMELINE_READ = "dns_timeline_read"
EMAILS_TIMELINE_READ = "emails_timeline_read"
INDICATORS_TIMELINE_READ = "indicators_timeline_read"
class ActorACL(vocab):
"""
Vocabulary for Actor ACLs.
"""
ACTOR = "Actor."
NAME_EDIT = ACTOR + "name_edit"
INTENDED_EFFECTS_READ = ACTOR + "intended_effects_read"
INTENDED_EFFECTS_EDIT = ACTOR + "intended_effects_edit"
MOTIVATIONS_READ = ACTOR + "motivations_read"
MOTIVATIONS_EDIT = ACTOR + "motivations_edit"
SOPHISTICATIONS_READ = ACTOR + "sophistications_read"
SOPHISTICATIONS_EDIT = ACTOR + "sophistications_edit"
THREAT_TYPES_READ = ACTOR + "threat_types_read"
THREAT_TYPES_EDIT = ACTOR + "threat_types_edit"
ACTOR_IDENTIFIERS_READ = ACTOR + "actor_identifiers_read"
ACTOR_IDENTIFIERS_EDIT = ACTOR + "actor_identifiers_edit"
ACTOR_IDENTIFIERS_ADD = ACTOR + "actor_identifiers_add"
ACTOR_IDENTIFIERS_DELETE = ACTOR + "actor_identifiers_delete"
# Basics
READ = ACTOR + Common.READ
WRITE = ACTOR + Common.WRITE
DELETE = ACTOR + Common.DELETE
DOWNLOAD = ACTOR + Common.DOWNLOAD
ALIASES_READ = ACTOR + Common.ALIASES_READ
ALIASES_EDIT = ACTOR + Common.ALIASES_EDIT
DESCRIPTION_READ = ACTOR + Common.DESCRIPTION_READ
DESCRIPTION_EDIT = ACTOR + Common.DESCRIPTION_EDIT
ACTIONS_READ = ACTOR + Common.ACTIONS_READ
ACTIONS_ADD = ACTOR + Common.ACTIONS_ADD
ACTIONS_EDIT = ACTOR + Common.ACTIONS_EDIT
ACTIONS_DELETE = ACTOR + Common.ACTIONS_DELETE
BUCKETLIST_READ = ACTOR + Common.BUCKETLIST_READ
BUCKETLIST_EDIT = ACTOR + Common.BUCKETLIST_EDIT
CAMPAIGNS_READ = ACTOR + Common.CAMPAIGNS_READ
CAMPAIGNS_ADD = ACTOR + Common.CAMPAIGNS_ADD
CAMPAIGNS_EDIT = ACTOR + Common.CAMPAIGNS_EDIT
CAMPAIGNS_DELETE = ACTOR + Common.CAMPAIGNS_DELETE
COMMENTS_READ = ACTOR + Common.COMMENTS_READ
COMMENTS_ADD = ACTOR + Common.COMMENTS_ADD
COMMENTS_EDIT = ACTOR + Common.COMMENTS_EDIT
COMMENTS_DELETE = ACTOR + Common.COMMENTS_DELETE
LOCATIONS_READ = ACTOR + Common.LOCATIONS_READ
LOCATIONS_ADD = ACTOR + Common.LOCATIONS_ADD
LOCATIONS_EDIT = ACTOR + Common.LOCATIONS_EDIT
LOCATIONS_DELETE = ACTOR + Common.LOCATIONS_DELETE
OBJECTS_READ = ACTOR + Common.OBJECTS_READ
OBJECTS_ADD = ACTOR + Common.OBJECTS_ADD
OBJECTS_EDIT = ACTOR + Common.OBJECTS_EDIT
OBJECTS_DELETE = ACTOR + Common.OBJECTS_DELETE
RELATIONSHIPS_READ = ACTOR + Common.RELATIONSHIPS_READ
RELATIONSHIPS_ADD = ACTOR + Common.RELATIONSHIPS_ADD
RELATIONSHIPS_EDIT = ACTOR + Common.RELATIONSHIPS_EDIT
RELATIONSHIPS_DELETE = ACTOR + Common.RELATIONSHIPS_DELETE
RELEASABILITY_READ = ACTOR + Common.RELEASABILITY_READ
RELEASABILITY_ADD = ACTOR + Common.RELEASABILITY_ADD
RELEASABILITY_DELETE = ACTOR + Common.RELEASABILITY_DELETE
SCREENSHOTS_READ = ACTOR + Common.SCREENSHOTS_READ
SCREENSHOTS_ADD = ACTOR + Common.SCREENSHOTS_ADD
SCREENSHOTS_DELETE = ACTOR + Common.SCREENSHOTS_DELETE
SECTORS_READ = ACTOR + Common.SECTORS_READ
SECTORS_EDIT = ACTOR + Common.SECTORS_EDIT
SERVICES_READ = ACTOR + Common.SERVICES_READ
SERVICES_EXECUTE = ACTOR + Common.SERVICES_EXECUTE
SOURCES_READ = ACTOR + Common.SOURCES_READ
SOURCES_ADD = ACTOR + Common.SOURCES_ADD
SOURCES_EDIT = ACTOR + Common.SOURCES_EDIT
SOURCES_DELETE = ACTOR + Common.SOURCES_DELETE
STATUS_READ = ACTOR + Common.STATUS_READ
STATUS_EDIT = ACTOR + Common.STATUS_EDIT
TICKETS_READ = ACTOR + Common.TICKETS_READ
TICKETS_ADD = ACTOR + Common.TICKETS_ADD
TICKETS_EDIT = ACTOR + Common.TICKETS_EDIT
TICKETS_DELETE = ACTOR + Common.TICKETS_DELETE
class BackdoorACL(vocab):
"""
Vocabulary for Backdoor ACLs
"""
BACKDOOR = "Backdoor."
ALIASES_READ = BACKDOOR + "aliases_read"
ALIASES_EDIT = BACKDOOR + "aliases_edit"
VERSION_EDIT = BACKDOOR + "version_edit"
NAME_EDIT = BACKDOOR + "name_edit"
CAMPAIGN_READ = Common.CAMPAIGN_READ
READ = BACKDOOR + Common.READ
WRITE = BACKDOOR + Common.WRITE
DELETE = BACKDOOR + Common.DELETE
DOWNLOAD = BACKDOOR + Common.DOWNLOAD
DESCRIPTION_READ = BACKDOOR + Common.DESCRIPTION_READ
DESCRIPTION_EDIT = BACKDOOR + Common.DESCRIPTION_EDIT
ACTIONS_READ = BACKDOOR + Common.ACTIONS_READ
ACTIONS_ADD = BACKDOOR + Common.ACTIONS_ADD
ACTIONS_EDIT = BACKDOOR + Common.ACTIONS_EDIT
ACTIONS_DELETE = BACKDOOR + Common.ACTIONS_DELETE
BUCKETLIST_READ = BACKDOOR + Common.BUCKETLIST_READ
BUCKETLIST_EDIT = BACKDOOR + Common.BUCKETLIST_EDIT
CAMPAIGNS_READ = BACKDOOR + Common.CAMPAIGNS_READ
CAMPAIGNS_ADD = BACKDOOR + Common.CAMPAIGNS_ADD
CAMPAIGNS_EDIT = BACKDOOR + Common.CAMPAIGNS_EDIT
CAMPAIGNS_DELETE = BACKDOOR + Common.CAMPAIGNS_DELETE
COMMENTS_READ = BACKDOOR + Common.COMMENTS_READ
COMMENTS_ADD = BACKDOOR + Common.COMMENTS_ADD
COMMENTS_EDIT = BACKDOOR + Common.COMMENTS_EDIT
COMMENTS_DELETE = BACKDOOR + Common.COMMENTS_DELETE
LOCATIONS_READ = BACKDOOR + Common.LOCATIONS_READ
LOCATIONS_ADD = BACKDOOR + Common.LOCATIONS_ADD
LOCATIONS_EDIT = BACKDOOR + Common.LOCATIONS_EDIT
LOCATIONS_DELETE = BACKDOOR + Common.LOCATIONS_DELETE
OBJECTS_READ = BACKDOOR + Common.OBJECTS_READ
OBJECTS_ADD = BACKDOOR + Common.OBJECTS_ADD
OBJECTS_EDIT = BACKDOOR + Common.OBJECTS_EDIT
OBJECTS_DELETE = BACKDOOR + Common.OBJECTS_DELETE
RELATIONSHIPS_READ = BACKDOOR + Common.RELATIONSHIPS_READ
RELATIONSHIPS_ADD = BACKDOOR + Common.RELATIONSHIPS_ADD
RELATIONSHIPS_EDIT = BACKDOOR + Common.RELATIONSHIPS_EDIT
RELATIONSHIPS_DELETE = BACKDOOR + Common.RELATIONSHIPS_DELETE
RELEASABILITY_READ = BACKDOOR + Common.RELEASABILITY_READ
RELEASABILITY_ADD = BACKDOOR + Common.RELEASABILITY_ADD
RELEASABILITY_DELETE = BACKDOOR + Common.RELEASABILITY_DELETE
SCREENSHOTS_READ = BACKDOOR + Common.SCREENSHOTS_READ
SCREENSHOTS_ADD = BACKDOOR + Common.SCREENSHOTS_ADD
SCREENSHOTS_DELETE = BACKDOOR + Common.SCREENSHOTS_DELETE
SECTORS_READ = BACKDOOR + Common.SECTORS_READ
SECTORS_EDIT = BACKDOOR + Common.SECTORS_EDIT
SERVICES_READ = BACKDOOR + Common.SERVICES_READ
SERVICES_EXECUTE = BACKDOOR + Common.SERVICES_EXECUTE
SOURCES_READ = BACKDOOR + Common.SOURCES_READ
SOURCES_ADD = BACKDOOR + Common.SOURCES_ADD
SOURCES_EDIT = BACKDOOR + Common.SOURCES_EDIT
SOURCES_DELETE = BACKDOOR + Common.SOURCES_DELETE
STATUS_READ = BACKDOOR + Common.STATUS_READ
STATUS_EDIT = BACKDOOR + Common.STATUS_EDIT
TICKETS_READ = BACKDOOR + Common.TICKETS_READ
TICKETS_ADD = BACKDOOR + Common.TICKETS_ADD
TICKETS_EDIT = BACKDOOR + Common.TICKETS_EDIT
TICKETS_DELETE = BACKDOOR + Common.TICKETS_DELETE
class CertificateACL(vocab):
"""
Vocabulary for Certificate ACLs
"""
CERTIFICATE = "Certificate."
READ = CERTIFICATE + Common.READ
WRITE = CERTIFICATE + Common.WRITE
DELETE = CERTIFICATE + Common.DELETE
DOWNLOAD = CERTIFICATE + Common.DOWNLOAD
DESCRIPTION_READ = CERTIFICATE + Common.DESCRIPTION_READ
DESCRIPTION_EDIT = CERTIFICATE + Common.DESCRIPTION_EDIT
ACTIONS_READ = CERTIFICATE + Common.ACTIONS_READ
ACTIONS_ADD = CERTIFICATE + Common.ACTIONS_ADD
ACTIONS_EDIT = CERTIFICATE + Common.ACTIONS_EDIT
ACTIONS_DELETE = CERTIFICATE + Common.ACTIONS_DELETE
BUCKETLIST_READ = CERTIFICATE + Common.BUCKETLIST_READ
BUCKETLIST_EDIT = CERTIFICATE + Common.BUCKETLIST_EDIT
CAMPAIGNS_READ = CERTIFICATE + Common.CAMPAIGNS_READ
CAMPAIGNS_ADD = CERTIFICATE + Common.CAMPAIGNS_ADD
CAMPAIGNS_EDIT = CERTIFICATE + Common.CAMPAIGNS_EDIT
CAMPAIGNS_DELETE = CERTIFICATE + Common.CAMPAIGNS_DELETE
COMMENTS_READ = CERTIFICATE + Common.COMMENTS_READ
COMMENTS_ADD = CERTIFICATE + Common.COMMENTS_ADD
COMMENTS_EDIT = CERTIFICATE + Common.COMMENTS_EDIT
COMMENTS_DELETE = CERTIFICATE + Common.COMMENTS_DELETE
LOCATIONS_READ = CERTIFICATE + Common.LOCATIONS_READ
LOCATIONS_ADD = CERTIFICATE + Common.LOCATIONS_ADD
LOCATIONS_EDIT = CERTIFICATE + Common.LOCATIONS_EDIT
LOCATIONS_DELETE = CERTIFICATE + Common.LOCATIONS_DELETE
OBJECTS_READ = CERTIFICATE + Common.OBJECTS_READ
OBJECTS_ADD = CERTIFICATE + Common.OBJECTS_ADD
OBJECTS_EDIT = CERTIFICATE + Common.OBJECTS_EDIT
OBJECTS_DELETE = CERTIFICATE + Common.OBJECTS_DELETE
RELATIONSHIPS_READ = CERTIFICATE + Common.RELATIONSHIPS_READ
RELATIONSHIPS_ADD = CERTIFICATE + Common.RELATIONSHIPS_ADD
RELATIONSHIPS_EDIT = CERTIFICATE + Common.RELATIONSHIPS_EDIT
RELATIONSHIPS_DELETE = CERTIFICATE + Common.RELATIONSHIPS_DELETE
RELEASABILITY_READ = CERTIFICATE + Common.RELEASABILITY_READ
RELEASABILITY_ADD = CERTIFICATE + Common.RELEASABILITY_ADD
RELEASABILITY_DELETE = CERTIFICATE + Common.RELEASABILITY_DELETE
SCREENSHOTS_READ = CERTIFICATE + Common.SCREENSHOTS_READ
SCREENSHOTS_ADD = CERTIFICATE + Common.SCREENSHOTS_ADD
SCREENSHOTS_DELETE = CERTIFICATE + Common.SCREENSHOTS_DELETE
SECTORS_READ = CERTIFICATE + Common.SECTORS_READ
SECTORS_EDIT = CERTIFICATE + Common.SECTORS_EDIT
SERVICES_READ = CERTIFICATE + Common.SERVICES_READ
SERVICES_EXECUTE = CERTIFICATE + Common.SERVICES_EXECUTE
SOURCES_READ = CERTIFICATE + Common.SOURCES_READ
SOURCES_ADD = CERTIFICATE + Common.SOURCES_ADD
SOURCES_EDIT = CERTIFICATE + Common.SOURCES_EDIT
SOURCES_DELETE = CERTIFICATE + Common.SOURCES_DELETE
STATUS_READ = CERTIFICATE + Common.STATUS_READ
STATUS_EDIT = CERTIFICATE + Common.STATUS_EDIT
TICKETS_READ = CERTIFICATE + Common.TICKETS_READ
TICKETS_ADD = CERTIFICATE + Common.TICKETS_ADD
TICKETS_EDIT = CERTIFICATE + Common.TICKETS_EDIT
TICKETS_DELETE = CERTIFICATE + Common.TICKETS_DELETE
class DomainACL(vocab):
"""
Vocabulary for Domain ACLs
"""
DOMAIN = "Domain."
CAMPAIGN_READ = Common.CAMPAIGN_READ
READ = DOMAIN + Common.READ
WRITE = DOMAIN + Common.WRITE
DELETE = DOMAIN + Common.DELETE
DOWNLOAD = DOMAIN + Common.DOWNLOAD
DESCRIPTION_READ = DOMAIN + Common.DESCRIPTION_READ
DESCRIPTION_EDIT = DOMAIN + Common.DESCRIPTION_EDIT
ACTIONS_READ = DOMAIN + Common.ACTIONS_READ
ACTIONS_ADD = | |
<gh_stars>1-10
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import pytest
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import parse_version
from sklearn.linear_model import LinearRegression
from sklearn.linear_model._base import _preprocess_data
from sklearn.linear_model._base import _rescale_data
from sklearn.linear_model._base import make_dataset
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
rng = np.random.RandomState(0)
rtol = 1e-6
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert reg.coef_.shape == (X.shape[1], ) # sanity checks
assert reg.score(X, y) > 0.5
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression().fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression().fit(X3, y)
assert (lr2_with_intercept.coef_.shape ==
lr2_without_intercept.coef_.shape)
assert (lr3_with_intercept.coef_.shape ==
lr3_without_intercept.coef_.shape)
assert (lr2_without_intercept.coef_.ndim ==
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_linear_regression_sparse_equal_dense(normalize, fit_intercept):
# Test that linear regression agrees between sparse and dense
rng = check_random_state(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.
Xcsr = sparse.csr_matrix(X)
y = rng.rand(n_samples)
params = dict(normalize=normalize, fit_intercept=fit_intercept)
clf_dense = LinearRegression(**params)
clf_sparse = LinearRegression(**params)
clf_dense.fit(X, y)
clf_sparse.fit(Xcsr, y)
assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
assert_allclose(clf_dense.coef_, clf_sparse.coef_)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression()
reg.fit((X), Y)
assert reg.coef_.shape == (2, n_features)
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert ols.coef_.shape == (2, n_features)
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_pd_sparse_dataframe_warning():
pd = pytest.importorskip('pandas')
# restrict the pd versions < '0.24.0' as they have a bug in is_sparse func
if parse_version(pd.__version__) < parse_version('0.24.0'):
pytest.skip("pandas 0.24+ required.")
# Warning is raised only when some of the columns is sparse
df = pd.DataFrame({'0': np.random.randn(10)})
for col in range(1, 4):
arr = np.random.randn(10)
arr[:8] = 0
# all columns but the first column is sparse
if col != 0:
arr = pd.arrays.SparseArray(arr, fill_value=0)
df[str(col)] = arr
msg = "pandas.DataFrame with sparse columns found."
with pytest.warns(UserWarning, match=msg):
reg = LinearRegression()
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
# does not warn when the whole dataframe is sparse
df['0'] = pd.arrays.SparseArray(df['0'], fill_value=0)
assert hasattr(df, "sparse")
with pytest.warns(None) as record:
reg.fit(df.iloc[:, 0:2], df.iloc[:, 3])
assert not record
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert csr_.getformat() == 'csr'
@pytest.mark.parametrize('is_sparse', (True, False))
@pytest.mark.parametrize('to_copy', (True, False))
def test_preprocess_copy_data_no_checks(is_sparse, to_copy):
X, y = make_regression()
X[X < 2.5] = 0.0
if is_sparse:
X = sparse.csr_matrix(X)
X_, y_, _, _, _ = _preprocess_data(X, y, True,
copy=to_copy, check_input=False)
if to_copy and is_sparse:
assert not np.may_share_memory(X_.data, X.data)
elif to_copy:
assert not np.may_share_memory(X_, X)
elif is_sparse:
assert np.may_share_memory(X_.data, X.data)
else:
assert np.may_share_memory(X_, X)
def test_dtype_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
X_32 = np.asarray(X, dtype=np.float32)
y_32 = np.asarray(y, dtype=np.float32)
X_64 = np.asarray(X, dtype=np.float64)
y_64 = np.asarray(y, dtype=np.float64)
for fit_intercept in [True, False]:
for normalize in [True, False]:
Xt_32, yt_32, X_mean_32, y_mean_32, X_norm_32 = _preprocess_data(
X_32, y_32, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_64, yt_64, X_mean_64, y_mean_64, X_norm_64 = _preprocess_data(
X_64, y_64, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_norm_3264 = (
_preprocess_data(X_32, y_64, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_norm_6432 = (
_preprocess_data(X_64, y_32, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
assert Xt_32.dtype == np.float32
assert yt_32.dtype == np.float32
assert X_mean_32.dtype == np.float32
assert y_mean_32.dtype == np.float32
assert X_norm_32.dtype == | |
'method': 'call',
'params': {'service': service_name, 'method': method, 'args': args},
'id': '%04x%010x' % (os.getpid(), (int(time.time() * 1E6) % 2**40)),
}
resp = http_post(url, json.dumps(data).encode('ascii'))
if resp.get('error'):
raise ServerError(resp['error'])
return resp['result']
class partial(functools.partial):
__slots__ = ()
def __repr__(self):
# Hide arguments on Python 3
return '%s(%r, ...)' % (self.__class__.__name__, self.func)
class Error(Exception):
"""An Odooly error."""
class ServerError(Exception):
"""An error received from the server."""
class Service(object):
"""A wrapper around XML-RPC endpoints.
The connected endpoints are exposed on the Client instance.
The `server` argument is the URL of the server (scheme+host+port).
If `server` is an ``odoo`` Python package, it is used to connect to the
local server. The `endpoint` argument is the name of the service
(examples: ``"object"``, ``"db"``). The `methods` is the list of methods
which should be exposed on this endpoint. Use ``dir(...)`` on the
instance to list them.
"""
_methods = ()
def __init__(self, client, endpoint, methods, verbose=False):
self._dispatch = client._proxy(endpoint)
self._rpcpath = client._server
self._endpoint = endpoint
self._methods = methods
self._verbose = verbose
def __repr__(self):
return "<Service '%s|%s'>" % (self._rpcpath, self._endpoint)
__str__ = __repr__
def __dir__(self):
return sorted(self._methods)
def __getattr__(self, name):
if name not in self._methods:
raise AttributeError("'Service' object has no attribute %r" % name)
if self._verbose:
def sanitize(args):
if self._endpoint != 'db' and len(args) > 2:
args = list(args)
args[2] = '*'
return args
maxcol = MAXCOL[min(len(MAXCOL), self._verbose) - 1]
def wrapper(self, *args):
snt = ', '.join([repr(arg) for arg in sanitize(args)])
snt = '%s.%s(%s)' % (self._endpoint, name, snt)
if len(snt) > maxcol:
suffix = '... L=%s' % len(snt)
snt = snt[:maxcol - len(suffix)] + suffix
print('--> ' + snt)
res = self._dispatch(name, args)
rcv = str(res)
if len(rcv) > maxcol:
suffix = '... L=%s' % len(rcv)
rcv = rcv[:maxcol - len(suffix)] + suffix
print('<-- ' + rcv)
return res
else:
wrapper = lambda s, *args: s._dispatch(name, args)
return _memoize(self, name, wrapper)
class Env(object):
"""An environment wraps data for Odoo models and records:
- :attr:`db_name`, the current database;
- :attr:`uid`, the current user id;
- :attr:`context`, the current context dictionary.
To retrieve an instance of ``some.model``:
>>> env["some.model"]
"""
name = uid = user = None
_cache = {}
def __new__(cls, client, db_name=()):
if not db_name or client.env.db_name:
env = object.__new__(cls)
env.client, env.db_name, env.context = client, db_name, {}
else:
env, env.db_name = client.env, db_name
if db_name:
env._model_names = env._cache_get('model_names', set)
env._models = {}
return env
def __contains__(self, name):
"""Test wether the given model exists."""
return name in self._model_names or name in self.models(name)
def __getitem__(self, name):
"""Return the given :class:`Model`."""
return self._get(name)
def __iter__(self):
"""Return an iterator on model names."""
return iter(self.models())
def __len__(self):
"""Return the size of the model registry."""
return len(self.models())
def __bool__(self):
return True
__nonzero__ = __bool__
__eq__ = object.__eq__
__ne__ = object.__ne__
__hash__ = object.__hash__
def __repr__(self):
return "<Env '%s@%s'>" % (self.user.login if self.uid else '',
self.db_name)
def check_uid(self, uid, password):
"""Check if ``(uid, password)`` is valid.
Return ``uid`` on success, ``False`` on failure.
The invalid entry is removed from the authentication cache.
"""
try:
self.client._object.execute_kw(self.db_name, uid, password,
'ir.model', 'fields_get', ([None],))
except Exception:
auth_cache = self._cache_get('auth')
if uid in auth_cache:
del auth_cache[uid]
uid = False
return uid
def _auth(self, user, password):
assert self.db_name, 'Not connected'
uid = verified = None
if isinstance(user, int_types):
(user, uid) = (uid, user)
auth_cache = self._cache_get('auth', dict)
if not password:
# Read from cache
(uid, password) = auth_cache.get(user or uid) or (uid, None)
# Read from model 'res.users'
if not password and self.access('res.users', 'write'):
domain = [('login', '=', user)] if user else [uid]
obj = self['res.users'].read(domain, 'id login password')
if obj:
uid = obj[0]['id']
user = obj[0]['login']
password = obj[0]['password']
else:
# Invalid user
uid = False
verified = password and uid
# Ask for password
if not password and uid is not False:
from getpass import getpass
if user is None:
name = 'admin' if uid == SUPERUSER_ID else ('UID %d' % uid)
else:
name = user
password = getpass('<PASSWORD>: ' % name)
# Check if password is valid
uid = self.check_uid(uid, password) if (uid and not verified) else uid
if uid is None:
# Do a standard 'login'
try:
uid = self.client.common.login(self.db_name, user, password)
except Exception as exc:
if 'does not exist' in str(exc): # Heuristic
raise Error('Database does not exist')
raise
if not uid:
raise Error('Invalid username or password')
# Update the cache
auth_cache[uid] = (uid, password)
if user:
auth_cache[user] = auth_cache[uid]
return (uid, password)
def _set_credentials(self, uid, password):
def env_auth(method): # Authenticated endpoints
return partial(method, self.db_name, uid, password)
self._execute = env_auth(self.client._object.execute)
self._execute_kw = env_auth(self.client._object.execute_kw)
if self.client._report: # Odoo <= 10
self.exec_workflow = env_auth(self.client._object.exec_workflow)
self.report = env_auth(self.client._report.report)
self.report_get = env_auth(self.client._report.report_get)
self.render_report = env_auth(self.client._report.render_report)
if self.client._wizard: # OpenERP 6.1
self.wizard_execute = env_auth(self.client._wizard.execute)
self.wizard_create = env_auth(self.client._wizard.create)
def _configure(self, uid, user, password, context):
if self.uid: # Create a new Env() instance
env = Env(self.client)
(env.db_name, env.name) = (self.db_name, self.name)
env.context = dict(context)
env._model_names = self._model_names
env._models = {}
else: # Configure the Env() instance
env = self
if uid == self.uid: # Copy methods
for key in ('_execute', '_execute_kw', 'exec_workflow',
'report', 'report_get', 'render_report',
'wizard_execute', 'wizard_create'):
if hasattr(self, key):
setattr(env, key, getattr(self, key))
else: # Create methods
env._set_credentials(uid, password)
# Setup uid and user
if isinstance(user, int_types):
user = 'admin' if uid == SUPERUSER_ID else None
elif isinstance(user, Record):
user = user.login
env.uid = uid
env.user = env._get('res.users', False).browse(uid)
if user:
assert isinstance(user, basestring), repr(user)
env.user.__dict__['login'] = user
env.user._cached_keys.add('login')
return env
@property
def odoo_env(self):
"""Return a server Environment.
Supported since Odoo 8.
"""
assert self.client.version_info >= 8.0, 'Not supported'
return self.client._server.api.Environment(self.cr, self.uid,
self.context)
@property
def cr(self):
"""Return a cursor on the database."""
return self.__dict__.get('cr') or _memoize(
self, 'cr', self.registry.db.cursor()
if self.client.version_info < 8.0 else self.registry.cursor())
@property
def registry(self):
"""Return the environment's registry."""
return self.client._server._get_pool(self.db_name)
def __call__(self, user=None, password=None, context=None):
"""Return an environment based on ``self`` with modified parameters."""
if user is not None:
(uid, password), context = self._auth(user, password), {}
elif context is not None:
(uid, user) = (self.uid, self.user)
else:
return self
env_key = json.dumps((uid, context), sort_keys=True)
env = self._cache_get(env_key)
if env is None:
env = self._configure(uid, user, password, context)
self._cache_set(env_key, env)
return env
def sudo(self, user=SUPERUSER_ID):
"""Attach to the provided user, or SUPERUSER."""
return self(user=user)
def ref(self, xml_id):
"""Return the record for the given ``xml_id`` external ID."""
(module, name) = xml_id.split('.')
data = self['ir.model.data'].read(
[('module', '=', module), ('name', '=', name)], 'model res_id')
if data:
assert len(data) == 1
return self[data[0]['model']].browse(data[0]['res_id'])
@property
def lang(self):
"""Return the current language code."""
return self.context.get('lang')
def refresh(self):
db_key = (self.db_name, self.client._server)
for key in list(self._cache):
if key[1:] == db_key and key[0] != 'auth':
del self._cache[key]
self._model_names = self._cache_set('model_names', set())
self._models = {}
def _cache_get(self, key, func=None):
try:
return self._cache[key, self.db_name, self.client._server]
except KeyError:
pass
if func is not None:
return self._cache_set(key, func())
def _cache_set(self, key, value, db_name=None):
self._cache[key, db_name or self.db_name, self.client._server] = value
return value
def execute(self, obj, method, *params, **kwargs):
"""Wrapper around ``object.execute_kw`` RPC method.
Argument `method` is the name of an ``osv.osv`` method or
a method available on this `obj`.
Method `params` are allowed. If needed, keyword
arguments are collected in `kwargs`.
"""
assert self.uid, 'Not connected'
assert isinstance(obj, basestring)
assert isinstance(method, basestring) and method != 'browse'
ordered = single_id = False
if method == 'read':
assert params, 'Missing parameter'
if not isinstance(params[0], list):
single_id = True
ids = [params[0]] if params[0] else False
elif params[0] and issearchdomain(params[0]):
# Combine search+read
search_params = searchargs(params[:1], kwargs)
ordered = len(search_params) > 3 and search_params[3]
kw = ({'context': self.context},) if self.context else ()
ids = self._execute_kw(obj, 'search', search_params, *kw)
else:
ordered = kwargs.pop('order', False) and params[0]
ids = set(params[0]) - {False}
if not ids and ordered:
return [False] * len(ordered)
ids = sorted(ids)
if not ids:
return ids
params = (ids,) + params[1:]
elif method == 'search':
# Accept keyword arguments for the search method
params = searchargs(params, kwargs)
elif method == 'search_count':
params = searchargs(params)
kw = ((dict(kwargs, context=self.context),)
if self.context | |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import os.path
import copy
import spack.util.environment
class Cp2k(MakefilePackage, CudaPackage):
"""CP2K is a quantum chemistry and solid state physics software package
that can perform atomistic simulations of solid state, liquid, molecular,
periodic, material, crystal, and biological systems
"""
homepage = 'https://www.cp2k.org'
url = 'https://github.com/cp2k/cp2k/releases/download/v3.0.0/cp2k-3.0.tar.bz2'
git = 'https://github.com/cp2k/cp2k.git'
list_url = 'https://github.com/cp2k/cp2k/releases'
maintainers = ['dev-zero']
version('7.1', sha256='ccd711a09a426145440e666310dd01cc5772ab103493c4ae6a3470898cd0addb')
version('6.1', sha256='af803558e0a6b9e9d9ce8a3ab955ba32bacd179922455424e061c82c9fefa34b')
version('5.1', sha256='e23613b593354fa82e0b8410e17d94c607a0b8c6d9b5d843528403ab09904412')
version('4.1', sha256='4a3e4a101d8a35ebd80a9e9ecb02697fb8256364f1eccdbe4e5a85d31fe21343')
version('3.0', sha256='1acfacef643141045b7cbade7006f9b7538476d861eeecd9658c9e468dc61151')
version('master', branch='master', submodules="True")
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=False, description='Enable OpenMP support')
variant('smm', default='libxsmm', values=('libxsmm', 'libsmm', 'blas'),
description='Library for small matrix multiplications')
variant('plumed', default=False, description='Enable PLUMED support')
variant('libxc', default=True,
description='Support additional functionals via libxc')
variant('pexsi', default=False,
description=('Enable the alternative PEXSI method'
'for density matrix evaluation'))
variant('elpa', default=False,
description='Enable optimised diagonalisation routines from ELPA')
variant('sirius', default=False,
description=('Enable planewave electronic structure'
' calculations via SIRIUS'))
variant('cosma', default=False, description='Use COSMA for p?gemm')
# override cuda_arch from CudaPackage since we only support one arch
# at a time and only specific ones for which we have parameter files
# for optimal kernels
variant('cuda_arch',
description='CUDA architecture',
default='none',
values=('none', '35', '37', '60', '70'),
multi=False)
variant('cuda_arch_35_k20x', default=False,
description=('CP2K (resp. DBCSR) has specific parameter sets for'
' different GPU models. Enable this when building'
' with cuda_arch=35 for a K20x instead of a K40'))
variant('cuda_fft', default=False,
description=('Use CUDA also for FFTs in the PW part of CP2K'))
variant('cuda_blas', default=False,
description=('Use CUBLAS for general matrix operations in DBCSR'))
HFX_LMAX_RANGE = range(4, 8)
variant('lmax',
description='Maximum supported angular momentum (HFX and others)',
default='5',
values=list(HFX_LMAX_RANGE),
multi=False)
depends_on('python', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api@3')
# require libxsmm-1.11+ since 1.10 can leak file descriptors in Fortran
depends_on('libxsmm@1.11:~header-only', when='smm=libxsmm')
# use pkg-config (support added in libxsmm-1.10) to link to libxsmm
depends_on('pkgconfig', type='build', when='smm=libxsmm')
# ... and in CP2K 7.0+ for linking to libint2
depends_on('pkgconfig', type='build', when='@7.0:')
# libint & libxc are always statically linked
depends_on('libint@1.1.4:1.2', when='@3.0:6.9', type='build')
for lmax in HFX_LMAX_RANGE:
# libint2 can be linked dynamically again
depends_on('libint@2.6.0:+fortran tune=cp2k-lmax-{0}'.format(lmax),
when='@7.0: lmax={0}'.format(lmax))
depends_on('libxc@2.2.2:', when='+libxc@:5.5999', type='build')
depends_on('libxc@4.0.3:', when='+libxc@6.0:6.9', type='build')
depends_on('libxc@4.0.3:', when='+libxc@7.0:')
depends_on('mpi@2:', when='+mpi')
depends_on('scalapack', when='+mpi')
depends_on('cosma+scalapack', when='+cosma')
depends_on('cosma+cuda+scalapack', when='+cosma+cuda')
depends_on('elpa@2011.12:2016.13+openmp', when='+openmp+elpa@:5.999')
depends_on('elpa@2011.12:2017.11+openmp', when='+openmp+elpa@6.0:')
depends_on('elpa@2018.05:+openmp', when='+openmp+elpa@7.0:')
depends_on('elpa@2011.12:2016.13~openmp', when='~openmp+elpa@:5.999')
depends_on('elpa@2011.12:2017.11~openmp', when='~openmp+elpa@6.0:')
depends_on('elpa@2018.05:~openmp', when='~openmp+elpa@7.0:')
depends_on('plumed+shared+mpi', when='+plumed+mpi')
depends_on('plumed+shared~mpi', when='+plumed~mpi')
# while we link statically against PEXSI, its own deps may be linked in
# dynamically, therefore can't set this as pure build-type dependency.
depends_on('pexsi+fortran@0.9.0:0.9.999', when='+pexsi@:4.999')
depends_on('pexsi+fortran@0.10.0:', when='+pexsi@5.0:')
# only OpenMP should be consistenly used, all other common things
# like ELPA, SCALAPACK are independent and Spack will ensure that
# a consistent/compat. combination is pulled in to the dependency graph.
depends_on('sirius+fortran+vdwxc+shared+openmp', when='+sirius+openmp')
depends_on('sirius+fortran+vdwxc+shared~openmp', when='+sirius~openmp')
# the bundled libcusmm uses numpy in the parameter prediction (v7+)
# which is written using Python 3
depends_on('py-numpy', when='@7:+cuda', type='build')
depends_on('python@3.6:', when='@7:+cuda', type='build')
# PEXSI, ELPA, COSMA and SIRIUS depend on MPI
conflicts('~mpi', '+pexsi')
conflicts('~mpi', '+elpa')
conflicts('~mpi', '+sirius')
conflicts('~mpi', '+cosma')
conflicts('+sirius', '@:6.999') # sirius support was introduced in 7+
conflicts('+cosma', '@:7.999') # COSMA support was introduced in 8+
conflicts('~cuda', '+cuda_fft')
conflicts('~cuda', '+cuda_blas')
# Apparently cp2k@4.1 needs an "experimental" version of libwannier.a
# which is only available contacting the developer directly. See INSTALL
# in the stage of cp2k@4.1
depends_on('wannier90', when='@3.0+mpi', type='build')
# CP2K needs compiler specific compilation flags, e.g. optflags
conflicts('%clang')
conflicts('%nag')
@property
def makefile_architecture(self):
return '{0.architecture}-{0.compiler.name}'.format(self.spec)
@property
def makefile_version(self):
return '{prefix}{suffix}'.format(
prefix='p' if '+mpi' in self.spec else 's',
suffix='smp' if '+openmp' in self.spec else 'opt'
)
@property
def makefile(self):
makefile_basename = '.'.join([
self.makefile_architecture, self.makefile_version
])
return os.path.join('arch', makefile_basename)
@property
def archive_files(self):
return [os.path.join(self.stage.source_path, self.makefile)]
def consistency_check(self, spec):
"""
Consistency checks.
Due to issue #1712 we can not put them into depends_on/conflicts.
"""
if '+openmp' in spec:
if '^openblas' in spec and '^openblas threads=openmp' not in spec:
raise InstallError(
'^openblas threads=openmp required for cp2k+openmp'
' with openblas')
if '^fftw' in spec and '^fftw +openmp' not in spec:
raise InstallError(
'^fftw +openmp required for cp2k+openmp'
' with fftw')
# MKL doesn't need to be checked since they are
# OMP thread-safe when using mkl_sequential
# BUT: we should check the version of MKL IF it is used for FFTW
# since there we need at least v14 of MKL to be safe!
def edit(self, spec, prefix):
self.consistency_check(spec)
pkgconf = which('pkg-config')
if '^fftw' in spec:
fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']
fftw_header_dir = fftw.headers.directories[0]
elif '^intel-mkl' in spec:
fftw = spec['intel-mkl']
fftw_header_dir = fftw.headers.directories[0] + '/fftw'
elif '^intel-parallel-studio+mkl' in spec:
fftw = spec['intel-parallel-studio']
fftw_header_dir = fftw.headers.directories[0] + '/fftw'
optimization_flags = {
'gcc': [
'-O2',
'-funroll-loops',
'-ftree-vectorize',
],
'intel': ['-O2', '-pc64', '-unroll', ],
'pgi': ['-fast'],
'cray': ['-O2'],
'xl': ['-O3'],
}
dflags = ['-DNDEBUG']
cppflags = [
'-D__LIBINT',
'-D__FFTW3',
'-I{0}'.format(fftw_header_dir),
]
if '@:6.9' in spec:
cppflags += [
'-D__LIBINT_MAX_AM=6',
'-D__LIBDERIV_MAX_AM1=5',
]
if '^mpi@3:' in spec:
cppflags.append('-D__MPI_VERSION=3')
elif '^mpi@2:' in spec:
cppflags.append('-D__MPI_VERSION=2')
cflags = optimization_flags[self.spec.compiler.name][:]
cxxflags = optimization_flags[self.spec.compiler.name][:]
fcflags = optimization_flags[self.spec.compiler.name][:]
nvflags = ['-O3']
ldflags = []
libs = []
gpuver = ''
if '%intel' in spec:
cflags.append('-fp-model precise')
cxxflags.append('-fp-model precise')
fcflags += [
'-fp-model precise',
'-heap-arrays 64',
'-g',
'-traceback',
]
elif '%gcc' in spec:
fcflags += [
'-ffree-form',
'-ffree-line-length-none',
'-ggdb', # make sure we get proper Fortran backtraces
]
elif '%pgi' in spec:
fcflags += ['-Mfreeform', '-Mextend']
elif '%cray' in spec:
fcflags += ['-emf', '-ffree', '-hflex_mp=strict']
elif '%xl' in spec:
fcflags += ['-qpreprocess', '-qstrict', '-q64']
ldflags += ['-Wl,--allow-multiple-definition']
if '+openmp' in spec:
cflags.append(self.compiler.openmp_flag)
cxxflags.append(self.compiler.openmp_flag)
fcflags.append(self.compiler.openmp_flag)
ldflags.append(self.compiler.openmp_flag)
nvflags.append('-Xcompiler="{0}"'.format(
self.compiler.openmp_flag))
elif '%cray' in spec: # Cray enables OpenMP by default
cflags += ['-hnoomp']
cxxflags += ['-hnoomp']
fcflags += ['-hnoomp']
ldflags += ['-hnoomp']
if '@7:' in spec: # recent versions of CP2K use C++14 CUDA code
cxxflags.append(self.compiler.cxx14_flag)
nvflags.append(self.compiler.cxx14_flag)
ldflags.append(fftw.libs.search_flags)
if 'superlu-dist@4.3' in spec:
ldflags.insert(0, '-Wl,--allow-multiple-definition')
if '@:6.9' in spec:
# libint-1.x.y has to be linked statically to work around
# inconsistencies in its Fortran interface definition
# (short-int vs int) which otherwise causes segfaults at runtime
# due to wrong offsets into the shared library symbols.
libs.extend([
os.path.join(spec['libint'].libs.directories[0], 'libderiv.a'),
os.path.join(spec['libint'].libs.directories[0], 'libint.a'),
])
else:
fcflags += pkgconf('--cflags', 'libint2', output=str).split()
libs += pkgconf('--libs', 'libint2', output=str).split()
if '+plumed' in self.spec:
dflags.extend(['-D__PLUMED2'])
cppflags.extend(['-D__PLUMED2'])
libs.extend([
os.path.join(self.spec['plumed'].prefix.lib,
'libplumed.{0}'.format(dso_suffix))
])
cc = spack_cc if '~mpi' in spec else spec['mpi'].mpicc
cxx = spack_cxx if '~mpi' in spec else spec['mpi'].mpicxx
fc = spack_fc if '~mpi' in spec else spec['mpi'].mpifc
# Intel
if '%intel' in spec:
cppflags.extend([
'-D__INTEL',
'-D__HAS_ISO_C_BINDING',
'-D__USE_CP2K_TRACE',
])
fcflags.extend([
'-diag-disable 8290,8291,10010,10212,11060',
'-free',
'-fpp'
])
# FFTW, LAPACK, BLAS
lapack = spec['lapack'].libs
blas = spec['blas'].libs
ldflags.append((lapack + blas).search_flags)
libs.extend([str(x) for x in (fftw.libs, lapack, blas)])
if '^intel-mkl' in spec or '^intel-parallel-studio+mkl' in spec:
cppflags += ['-D__MKL']
elif '^accelerate' in spec:
cppflags += ['-D__ACCELERATE']
if '+cosma' in spec:
# add before ScaLAPACK to override the p?gemm symbols
cosma = spec['cosma'].libs
ldflags.append(cosma.search_flags)
libs.extend(cosma)
# MPI
if '+mpi' in spec:
cppflags.extend([
'-D__parallel',
'-D__SCALAPACK'
])
scalapack = spec['scalapack'].libs
ldflags.append(scalapack.search_flags)
libs.extend(scalapack)
libs.extend(spec['mpi:cxx'].libs)
libs.extend(self.compiler.stdcxx_libs)
if 'wannier90' in spec:
cppflags.append('-D__WANNIER90')
wannier = os.path.join(
spec['wannier90'].libs.directories[0], 'libwannier.a'
)
libs.append(wannier)
if '+libxc' in spec:
cppflags += ['-D__LIBXC']
if '@:6.9' in spec:
libxc = spec['libxc:fortran,static']
cppflags += [libxc.headers.cpp_flags]
ldflags.append(libxc.libs.search_flags)
libs.append(str(libxc.libs))
else:
fcflags += pkgconf('--cflags', 'libxcf03', output=str).split()
libs += pkgconf('--libs', 'libxcf03', output=str).split()
if '+pexsi' in spec:
cppflags.append('-D__LIBPEXSI')
fcflags.append('-I' + os.path.join(
spec['pexsi'].prefix, 'fortran'))
libs.extend([
os.path.join(spec['pexsi'].libs.directories[0],
'libpexsi.a'),
os.path.join(spec['superlu-dist'].libs.directories[0],
'libsuperlu_dist.a'),
os.path.join(
spec['parmetis'].libs.directories[0],
'libparmetis.{0}'.format(dso_suffix)
),
os.path.join(
spec['metis'].libs.directories[0],
'libmetis.{0}'.format(dso_suffix)
),
])
if '+elpa' in spec:
elpa = spec['elpa']
elpa_suffix = '_openmp' if '+openmp' in elpa else ''
elpa_incdir = elpa.headers.directories[0]
fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'modules'))]
libs.append(os.path.join(elpa.libs.directories[0],
('libelpa{elpa_suffix}.{dso_suffix}'
.format(elpa_suffix=elpa_suffix,
dso_suffix=dso_suffix))))
if spec.satisfies('@:4.999'):
if elpa.satisfies('@:2014.5.999'):
cppflags.append('-D__ELPA')
elif elpa.satisfies('@2014.6:2015.10.999'):
cppflags.append('-D__ELPA2')
else:
cppflags.append('-D__ELPA3')
else:
cppflags.append('-D__ELPA={0}{1:02d}'
.format(elpa.version[0],
int(elpa.version[1])))
fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'elpa'))]
if spec.satisfies('+sirius'):
sirius = spec['sirius']
cppflags.append('-D__SIRIUS')
fcflags += ['-I{0}'.format(os.path.join(sirius.prefix, 'fortran'))]
libs += list(sirius.libs)
if spec.satisfies('+cuda'):
cppflags += ['-D__ACC']
libs += ['-lcudart', '-lnvrtc', '-lcuda']
if spec.satisfies('+cuda_blas'):
cppflags += ['-D__DBCSR_ACC=2']
libs += ['-lcublas']
else:
cppflags += ['-D__DBCSR_ACC']
if spec.satisfies('+cuda_fft'):
cppflags += ['-D__PW_CUDA']
libs += ['-lcufft', '-lcublas']
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch:
gpuver = {
'35': 'K40',
'37': 'K80',
'60': 'P100',
'70': 'V100',
}[cuda_arch]
if (cuda_arch == '35'
and | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
This project analyzes the control approach for Optima pumps, based on the competitor's algorithm
It uses the data from the prototype tests, fits a curve with two different approach and plots them on a graph
"""
import Database as db
import math
import numpy as np
import matplotlib.pyplot as plt
import time
FLOW_INCREMENT_STEP = 0.1
EEI_CALCULATION_STEP = 0.1
GRAPH_STEP = 0.5
GROUP_1 = 1
GROUP_2 = 2
DOUBLE_POWER_CONTROL = True
DIRECT_LINE_CONTROL = False
# Group 1
MAX_FLOW_POINT_3_12 = (11.5, 1.77, "3/12", 4900, GROUP_1)
MAX_FLOW_POINT_4_10 = (10, 1.4, "4/10", 4500, GROUP_1)
MAX_FLOW_POINT_3_10 = (10, 1.31, "3/10", 4500, GROUP_1)
MAX_FLOW_POINT_2_10 = (10, 1.31, "2/10", 4500, GROUP_1)
MAX_FLOW_POINT_3_7 = (8.7, 0.95, "3/7", 3800, GROUP_1)
# Group 2
MAX_FLOW_POINT_5_8 = (17.0, 1.1, "5/8", 4500, GROUP_2)
MAX_FLOW_POINT_4_8 = (17.0, 1.11, "4/8", 4600, GROUP_2)
MAX_FLOW_POINT_4_4 = (12.38, 0.60, "4/4", 3600, GROUP_2)
def main():
#findBestEEI(MAX_FLOW_POINT_3_12, DOUBLE_POWER_CONTROL, 4.2, 6, 7.5)
#findBestEEI(MAX_FLOW_POINT_4_10, DOUBLE_POWER_CONTROL, 2.5, 4, 6)
#findBestEEI(MAX_FLOW_POINT_3_10, DOUBLE_POWER_CONTROL, 2.5, 4, 6)
#findBestEEI(MAX_FLOW_POINT_2_10, DOUBLE_POWER_CONTROL, 2.5, 4, 6)
#findBestEEI(MAX_FLOW_POINT_3_7, DOUBLE_POWER_CONTROL, 2.2, 4, 5.8)
findBestEEI(MAX_FLOW_POINT_5_8, DOUBLE_POWER_CONTROL, 7, 7.8, 10.5)
#findBestEEI(MAX_FLOW_POINT_4_8, DOUBLE_POWER_CONTROL, 5.2, 7.8, 10.5)
#findBestEEI(MAX_FLOW_POINT_4_4, DOUBLE_POWER_CONTROL, 4, 7, 8.1)
# EFFECTS : Consumes a flow(float), input power(float), groupNumber(integer)
# and returns the calculated head from the database's fitted curve
def calculateHeadFromFlowAndInputPower(flow, inputPower, groupNumber):
parameters = [flow**3, flow**2, flow, inputPower**3,
inputPower**2, inputPower, (flow**3)*inputPower, (flow**2)*inputPower,
flow*inputPower, (flow**3)*(inputPower**2), (flow**2)*(inputPower**2), flow*(inputPower**2),
(flow**3)*(inputPower**3), (flow**2)*(inputPower**3), flow*(inputPower**3), 1]
result = 0
for i in range(16):
if groupNumber == 1:
result += parameters[i] * db.G1_Q_Pelk_H_curve_parameters[i]
elif groupNumber == 2:
result += parameters[i] * db.G2_Q_Pelk_H_curve_parameters[i]
return result
# EFFECTS : Consumes a flow(float), head(float), groupNumber(integer)
# and returns the calculated input power from the database's fitted curve
def calculateInputPowerFromFlowAndHead(flow, head, groupNumber):
parameters = [flow**3, flow**2, flow, head**3,
head**2, head, (flow**3)*head, (flow**2)*head,
flow*head, (flow**3)*(head**2), (flow**2)*(head**2), flow*(head**2),
(flow**3)*(head**3), (flow**2)*(head**3), flow*(head**3), 1]
result = 0
for i in range(16):
if groupNumber == 1:
result += parameters[i] * db.G1_Q_H_Pelk_curve_parameters[i]
elif groupNumber == 2:
result += parameters[i] * db.G2_Q_H_Pelk_curve_parameters[i]
return result
# EFFECTS : Consumes a flow(float), speed(float), groupNumber(integer)
# and returns the calculated head from the database's fitted curve
def calculateHeadFromFlowAndSpeed(flow, speed, groupNumber):
parameters = [flow**3, flow**2, flow, speed**3,
speed**2, speed, (flow**3)*speed, (flow**2)*speed,
flow*speed, (flow**3)*(speed**2), (flow**2)*(speed**2), flow*(speed**2),
(flow**3)*(speed**3), (flow**2)*(speed**3), flow*(speed**3), 1]
result = 0
for i in range(16):
if groupNumber == 1:
result += parameters[i] * db.G1_Q_Speed_H_curve_parameters[i]
elif groupNumber == 2:
result += parameters[i] * db.G2_Q_Speed_H_curve_parameters[i]
return result
# EFFECTS : Consumes a flow(float), head(float)
# and returns the calculated torque from the database's fitted curve
def calculateTorqueFromFlowAndHead(flow, head, groupNumber):
Input_Power = calculateInputPowerFromFlowAndHead(flow, head, groupNumber)
rpm = calculateSpeedFromFlowAndHead(flow, head, groupNumber)
parameters = [Input_Power**3, Input_Power**2, Input_Power, rpm**3,
rpm**2, rpm, (Input_Power**3)*rpm, (Input_Power**2)*rpm,
Input_Power*rpm, (Input_Power**3)*(rpm**2), (Input_Power**2)*(rpm**2), Input_Power*(rpm**2),
(Input_Power**3)*(rpm**3), (Input_Power**2)*(rpm**3), Input_Power*(rpm**3), 1]
result = 0
for i in range(16):
result += parameters[i] * db.Pelk_Speed_Torque_curve_parameters[i]
return result
# EFFECTS : Consumes a rpm(float), Input_Power(float)
# and returns the calculated torque from the database's fitted curve
def calculateTorqueFromSpeedAndInputPower(rpm, Input_Power):
parameters = [Input_Power**3, Input_Power**2, Input_Power, rpm**3,
rpm**2, rpm, (Input_Power**3)*rpm, (Input_Power**2)*rpm,
Input_Power*rpm, (Input_Power**3)*(rpm**2), (Input_Power**2)*(rpm**2), Input_Power*(rpm**2),
(Input_Power**3)*(rpm**3), (Input_Power**2)*(rpm**3), Input_Power*(rpm**3), 1]
result = 0
for i in range(16):
result += parameters[i] * db.Pelk_Speed_Torque_curve_parameters[i]
return result
# EFFECTS : Consumes a flow(float), head(float), groupNumber(integer)
# and returns the calculated speed from the database's fitted curve
def calculateSpeedFromFlowAndHead(flow, head, groupNumber):
parameters = [flow**3, flow**2, flow, head**3,
head**2, head, (flow**3)*head, (flow**2)*head,
flow*head, (flow**3)*(head**2), (flow**2)*(head**2), flow*(head**2),
(flow**3)*(head**3), (flow**2)*(head**3), flow*(head**3), 1]
result = 0
for i in range(16):
if groupNumber == 1:
result += parameters[i] * db.G1_Q_H_speed_curve_parameters[i]
elif groupNumber == 2:
result += parameters[i] * db.G2_Q_H_speed_curve_parameters[i]
return result
# EFFECTS : Consumes a max flow point tuple (size of 5), control approach (Boolean), three flow points (float)
# and plots the graph or graphs to console based on the control approach.
def findBestEEI(maxFlowPoint, doublePowerControl, firstQ, secondQ, thirdQ):
if doublePowerControl:
"""
************************************
Double Power Control
************************************
"""
speed = maxFlowPoint[3]
groupNumber = maxFlowPoint[4]
Qh = []
Qh, maxPelk, maxPowerOutputPoint, EEI = createFlowHeadArrayWithDoublePowerControl(maxFlowPoint,
firstQ,
secondQ,
thirdQ,
groupNumber,
speed)
firstPointOfLine = (0.0, calculateHeadFromFlowAndSpeed(0.0, speed, groupNumber), groupNumber)
Q_H_Array, maxPower2 = createFlowHeadArray(firstPointOfLine, speed, maxFlowPoint, groupNumber)
Alarko_x = []
Alarko_y = []
PhidList = []
PelkList = []
SpeedList = []
TorqueList = []
for e in Qh:
Alarko_x.append(e[0])
Alarko_y.append(e[1])
PhidList.append(e[0] * e[1]* 2.72)
PelkList.append(calculateInputPowerFromFlowAndHead(e[0], e[1], groupNumber))
SpeedList.append(calculateSpeedFromFlowAndHead(e[0], e[1], groupNumber))
TorqueList.append(calculateTorqueFromFlowAndHead(e[0], e[1], groupNumber))
if (maxFlowPoint[2] == "3/12"):
Competitor_Excel = db.Competitor_3_12()
if (maxFlowPoint[2] == "4/10" or "3/10" or "2/10"):
Competitor_Excel = db.Competitor_2_10()
if (maxFlowPoint[2] == "3/7"):
Competitor_Excel = db.Competitor_3_7()
if (maxFlowPoint[2] == "4/8"):
Competitor_Excel = db.Competitor_4_8()
if (maxFlowPoint[2] == "5/8"):
Competitor_Excel = db.Competitor_5_8()
if (maxFlowPoint[2] == "4/4"):
Competitor_Excel = db.Competitor_4_4()
Competitor_Flow_List = []
Competitor_Head_List = []
Competitor_Power_List = []
for e in Competitor_Excel:
Competitor_Flow_List.append(e[0])
Competitor_Head_List.append(e[1])
Competitor_Power_List.append(e[2])
plt.figure(figsize = (15,5))
""" SUBPLOT 1 """
plt.subplot(1,2,1)
label = maxFlowPoint[2]
plt.title('Flow - Head of '+label)
plt.xlabel("Flow")
plt.ylabel("Head")
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.xlim(0, maxFlowPoint[0] + 0.5)
plt.ylim(0, Competitor_Head_List[-1] + 2.5)
plt.plot(Alarko_x, Alarko_y, 'r')
plt.plot([firstQ, firstQ], [0, 15], 'r--')
plt.plot([secondQ, secondQ], [0, 15], 'r--')
plt.plot([thirdQ, thirdQ], [0, 15], 'r--')
plt.plot([0, maxPowerOutputPoint[0]],[maxPowerOutputPoint[1] / 2, maxPowerOutputPoint[1]], 'go-')
QHtext = "Q= " + '%.2f'%maxPowerOutputPoint[0] + "\n" + "H= " + '%.2f'%maxPowerOutputPoint[1] + "\n" + "Pelk= " + '%.2f'%maxPelk + "\nEEI= " + '%.3f'%EEI
plt.text(maxPowerOutputPoint[0] - 0.5, maxPowerOutputPoint[1] - 3.5 , QHtext, fontsize = 12, weight = 'bold')
""" Competitor"""
plt.text(7, 8, "Competitor" + "\n" + "EEI: 0.194", fontsize = 12, weight = 'bold', color = 'blue')
plt.plot(Competitor_Flow_List, Competitor_Head_List, 'b--')
""" SUBPLOT 2 """
plt.subplot(1,2,2)
plt.title('Flow - Input Power of ' + label)
plt.xlabel("Flow")
plt.ylabel("Input")
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.plot(Alarko_x, PelkList, 'r')
plt.plot(Alarko_x, PhidList, 'orange')
plt.plot([firstQ, firstQ], [0, 320], 'r--')
plt.plot([secondQ, secondQ], [0, 320], 'r--')
plt.plot([thirdQ, thirdQ], [0, 320], 'r--')
plt.plot(maxPowerOutputPoint[0], calculateInputPowerFromFlowAndHead(maxPowerOutputPoint[0], maxPowerOutputPoint[1], groupNumber), 'go')
plt.plot(maxPowerOutputPoint[0], maxPowerOutputPoint[0] * maxPowerOutputPoint[1] * 2.72, 'go')
plt.xlim(0, maxFlowPoint[0] + 0.5)
plt.ylim(0, maxPelk + 30)
""" Competitor"""
plt.plot(Competitor_Flow_List, Competitor_Power_List, 'b--')
plt.show()
fig, ax1 = plt.subplots()
ax1.set_title('Flow - Speed of ' + label)
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
# plt.plot(Alarko_x, PelkList, 'r')
# plt.plot(Alarko_x, PhidList, 'orange')
# plt.plot(Alarko_x, SpeedList, 'orange')
ax1.plot([firstQ, firstQ], [maxFlowPoint[3] - 1000, maxFlowPoint[3] + 200], 'r--')
ax1.plot([secondQ, secondQ], [maxFlowPoint[3] - 1000, maxFlowPoint[3] + 200], 'r--')
ax1.plot([thirdQ, thirdQ], [maxFlowPoint[3] - 1000, maxFlowPoint[3] + 200], 'r--')
# plt.plot(maxPowerOutputPoint[0], calculateInputPowerFromFlowAndHead(maxPowerOutputPoint[0], maxPowerOutputPoint[1], groupNumber), 'go')
# plt.plot(maxPowerOutputPoint[0], maxPowerOutputPoint[0] * maxPowerOutputPoint[1] * 2.72, 'go')
color = 'tab:red'
# ax1.set_xlabel('time (s)')
# ax1.set_ylabel('exp', color=color)
ax1.plot(Alarko_x, SpeedList, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax1.set_xlabel("Flow")
ax1.set_ylabel("Speed", color=color)
plt.xlim(0, maxFlowPoint[0] + 0.5)
plt.ylim(maxFlowPoint[3] - 1000, maxFlowPoint[3] + 200)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Torque', color=color) # we already handled the x-label with ax1
ax2.plot(Alarko_x, TorqueList, color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
print(maxPowerOutputPoint)
print("Speed and Torque")
for i in range(4):
print(round(calculateSpeedFromFlowAndHead(maxPowerOutputPoint[0]*(i+1) / 4, maxPowerOutputPoint[1] * (5 + i) / 8 , groupNumber)), " ", end="")
print(round(calculateTorqueFromFlowAndHead(maxPowerOutputPoint[0]*(i+1) / 4, maxPowerOutputPoint[1] * (5 + i) / 8 , groupNumber), 2))
else:
"""
************************************
Direct Line Control
************************************
"""
speed = maxFlowPoint[3]
groupNumber = maxFlowPoint[4]
EEI = 0.0
firstPointOfLine = (0.0, calculateHeadFromFlowAndSpeed(0.0, speed, groupNumber), groupNumber)
Qh = []
Q_H_Array, maxPower = createFlowHeadArray(firstPointOfLine, speed, maxFlowPoint, groupNumber)
QBreakPointList = []
EEIList = []
for Q_break_point in np.arange(firstPointOfLine[0], maxFlowPoint[0], FLOW_INCREMENT_STEP):
(Qh, CTI_at_best_EEI, EEI) = findEEI([Q_break_point, calculateHeadFromFlowAndSpeed(Q_break_point, speed, groupNumber)],
maxFlowPoint,
speed,
groupNumber)
if Q_break_point % GRAPH_STEP == 0:
x = []
y = []
PelkList = []
QBreakPointList.append(Q_break_point)
EEIList.append(EEI)
for e in Qh:
x.append(e[0])
y.append(e[1])
PelkList.append(calculateInputPowerFromFlowAndHead(e[0], e[1], groupNumber))
def plotGraphs():
plt.figure(figsize=(15,5))
""" SUBPLOT 1"""
plt.subplot(1,2,1)
label = maxFlowPoint[2]
plt.title('Flow - Head of '+label)
plt.xlabel("Flow")
plt.ylabel("Head")
| |
<gh_stars>1-10
#!/usr/bin/python
#
# Copyright (C) 2010-2021 <NAME>, Maerki Informatik
# License: Apache License v2
#
# Siehe http://www.maerki.com/hans/orux
#
# History:
# 2010-06-22, <NAME>, Implementiert
# 2010-06-23, <NAME>, Koordinaten der Karte Massstab 1:50000 angepasst.
# 2011-01-17, <NAME>, Neu koennen Karten in Unterordner gruppiert werden.
# 2011-02-16, <NAME>, Swisstopo hat die Server gewechselt: Neue Url angepasst.
# 2013-09-06, <NAME>, Swisstopo hat die Server gewechselt: Neue Url angepasst.
# 2018-04-24, <NAME>, Swisstopo hat die Server gewechselt: Neue Logik angepasst.
# 2019-06-03, <NAME>, Angepasst an Python 3.7.2.
# 2021-03-28, <NAME>, Massive cleanup.
"""
http://map.geo.admin.ch
http://gpso.de/navigation/utm.html
UTM- Koordinatensystem, WGS84- Kartendatum
http://de.wikipedia.org/wiki/Kartendatum
Geodaetisches Datum
https://www.swisstopo.admin.ch/de/wissen-fakten/geodaesie-vermessung/bezugsysteme/kartenprojektionen.html
Schweizerische Kartenprojektionen
https://www.swisstopo.admin.ch/de/karten-daten-online/calculation-services/navref.html
https://www.swisstopo.admin.ch/content/swisstopo-internet/de/online/calculation-services/_jcr_content/contentPar/tabs/items/dokumente_und_publik/tabPar/downloadlist/downloadItems/8_1467103085694.download/refsys_d.pdf
Umrechnung von Schweizer Landeskoordinaten in ellipsoidische WGS84-Koordinaten
http://de.wikipedia.org/wiki/WGS_84
World Geodetic System 1984 (WGS 84)
"""
import math
import time
import shutil
import pathlib
import pickle
import sqlite3
from dataclasses import dataclass
from multiprocessing import Pool
import requests
import PIL.Image
import rasterio
import rasterio.plot
from oruxmap.utils import projection
from oruxmap.utils.projection import CH1903, BoundsCH1903, create_boundsCH1903_extrema
from oruxmap.utils.context import Context
from oruxmap.utils.orux_xml_otrk2 import OruxXmlOtrk2
from oruxmap.utils.download_zip_and_extract import DownloadZipAndExtractTiff
from oruxmap.layers_switzerland import LIST_LAYERS, LayerParams
from oruxmap.utils.img_png import extract_tile
DIRECTORY_ORUX_SWISSTOPO = pathlib.Path(__file__).absolute().parent
DIRECTORY_RESOURCES = DIRECTORY_ORUX_SWISSTOPO / "resources"
DIRECTORY_BASE = DIRECTORY_ORUX_SWISSTOPO.parent
DIRECTORY_TARGET = DIRECTORY_BASE / "target"
DIRECTORY_CACHE_TIF = DIRECTORY_TARGET / "cache_tif"
DIRECTORY_CACHE_PNG = DIRECTORY_TARGET / "cache_png"
DIRECTORY_LOGS = DIRECTORY_TARGET / "logs"
DIRECTORY_MAPS = DIRECTORY_TARGET / "maps"
DIRECTORY_TARGET.mkdir(exist_ok=True)
DIRECTORY_CACHE_TIF.mkdir(exist_ok=True)
DIRECTORY_CACHE_PNG.mkdir(exist_ok=True)
DIRECTORY_LOGS.mkdir(exist_ok=True)
DIRECTORY_MAPS.mkdir(exist_ok=True)
assert DIRECTORY_MAPS.exists()
PIL.Image.MAX_IMAGE_PIXELS = None
def directory_png_cache(layer_param: LayerParams, context: Context):
return DIRECTORY_CACHE_PNG / context.append_version("png") / layer_param.name
class DurationLogger:
def __init__(self, step: str):
self.step = step
self.start_s = time.perf_counter()
def __enter__(self):
return self
def __exit__(self, _type, value, tb):
print(f"{self.step} took {time.perf_counter() - self.start_s:0.0f}s")
class OruxMap:
def __init__(self, map_name, context):
assert isinstance(context, Context)
self.map_name = context.append_version(map_name)
self.context = context
self.directory_map = DIRECTORY_MAPS / self.map_name
print("===== ", self.map_name)
# Remove zip file
filename_zip = self.directory_map.with_suffix(".zip")
if filename_zip.exists():
filename_zip.unlink()
# Create empty directory
for filename in self.directory_map.glob("*.*"):
filename.unlink()
self.directory_map.mkdir(parents=True, exist_ok=True)
self.filename_sqlite = self.directory_map / "OruxMapsImages.db"
if self.filename_sqlite.exists():
self.filename_sqlite.unlink()
self.db = sqlite3.connect(self.filename_sqlite, isolation_level=None)
self.db.execute("pragma journal_mode=OFF")
self.db.execute(
"""CREATE TABLE tiles (x int, y int, z int, image blob, PRIMARY KEY (x,y,z))"""
)
self.db.execute("""CREATE TABLE "android_metadata" (locale TEXT)""")
self.db.execute("""INSERT INTO "android_metadata" VALUES ("de_CH");""")
self.xml_otrk2 = OruxXmlOtrk2(
filename=self.directory_map / f"{self.map_name}.otrk2.xml",
map_name=self.map_name,
)
def __enter__(self):
return self
def __exit__(self, _type, value, tb):
self.xml_otrk2.close()
self.db.commit()
if not self.context.skip_sqlite_vacuum:
with DurationLogger("sqlite.execute('VACUUM')") as duration:
before_bytes = self.filename_sqlite.stat().st_size
self.db.execute("VACUUM")
after_bytes = self.filename_sqlite.stat().st_size
print(
f"Vaccum by {100.0*(before_bytes-after_bytes)/before_bytes:0.0f}%"
)
self.db.close()
if not self.context.skip_map_zip:
with DurationLogger("zip") as duration:
filename_zip = shutil.make_archive(
base_name=str(self.directory_map),
root_dir=str(self.directory_map.parent),
base_dir=self.directory_map.name,
format="zip",
)
print("----- Ready")
print(
f'The map now is ready in "{self.directory_map.relative_to(DIRECTORY_BASE)}".'
)
print(
"This directory must be copied 'by Hand' onto your android into 'oruxmaps/mapfiles'."
)
def create_layers(self, iMasstabMin: int = 25, iMasstabMax: int = 500):
with DurationLogger(f"Layer {self.map_name}") as duration:
start_s = time.perf_counter()
for layer_param in LIST_LAYERS:
if iMasstabMin <= layer_param.scale <= iMasstabMax:
self._create_layer(layer_param=layer_param)
def _create_layer(self, layer_param):
map_scale = MapScale(self, layer_param)
map_scale.create_boundsCH1903_pickle()
map_scale.create_png_pickle()
map_scale.create_map()
@dataclass
class DebugPng:
tiff_filename: str
x_tile: int
y_tile: int
x_tif_pixel: int
y_tif_pixel: int
@staticmethod
def csv_header():
return "tiff_filename,x_tile,y_tile,x_tif_pixel,y_tif_pixel"
@property
def csv(self):
return f"{self.tiff_filename},{self.x_tile},{self.y_tile},{self.x_tif_pixel},{self.y_tif_pixel}"
class DebugLogger:
def __init__(self, map_scale):
self.map_scale = map_scale
def report(self, list_tiff_attrs, boundsCH1903_extrema):
assert isinstance(list_tiff_attrs, list)
assert isinstance(boundsCH1903_extrema, BoundsCH1903)
def fopen(extension):
filename_base = f"debug_log_{self.map_scale.layer_param.name}"
filename = DIRECTORY_LOGS / (filename_base + extension)
return filename.open("w")
with fopen("_tiff.csv") as f:
f.write(f"filename,{BoundsCH1903.csv_header('boundsCH1903')}\n")
for tiff_attrs in list_tiff_attrs:
assert isinstance(tiff_attrs, TiffImageAttributes)
f.write(f"{tiff_attrs.filename.name},{tiff_attrs.boundsCH1903.csv}\n")
f.write(f"all,{boundsCH1903_extrema.csv}\n")
# No access to list 'debug_pngs'
# with fopen("_png.csv") as f:
# f.write(
# f"{DebugPng.csv_header()},{BoundsCH1903.csv_header('boundsCH1903')}\n"
# )
# for tiff_filename, tiff_boundsCH1903 in list_filename_boundsCH1903:
# for debug_png in tiff_image.debug_pngs:
# f.write(f"{debug_png.csv},{tiff_image.boundsCH1903.csv}\n")
@dataclass
class CacheTiffBoundsCH1903:
layer_param: LayerParams
context: Context
@property
def filename(self):
return (
directory_png_cache(layer_param=self.layer_param, context=self.context)
/ f"list_filename_boundsCH1903.pickle"
)
def _check(self, list_attrs):
for attrs in list_attrs:
assert isinstance(attrs, TiffImageAttributes)
def dump(self, list_attrs: list):
self.filename.parent.mkdir(parents=True, exist_ok=True)
self._check(list_attrs=list_attrs)
with self.filename.open("wb") as f:
pickle.dump(list_attrs, f)
def load(self) -> list:
with self.filename.open("rb") as f:
list_attrs = pickle.load(f)
self._check(list_attrs=list_attrs)
return list_attrs
class MapScale:
"""
This object represents one scale. For example 1:25'000, 1:50'000.
"""
def __init__(self, orux_maps, layer_param):
self.orux_maps = orux_maps
self.layer_param = layer_param
self.debug_logger = DebugLogger(self)
self.directory_resources = DIRECTORY_RESOURCES / self.layer_param.name
assert self.directory_resources.exists()
def create_boundsCH1903_pickle(self):
c = CacheTiffBoundsCH1903(
layer_param=self.layer_param, context=self.orux_maps.context
)
if c.filename.exists():
return
def iter_download_tiffs(filename_url_tiffs):
assert filename_url_tiffs.exists()
directory_cache = DIRECTORY_CACHE_TIF / self.layer_param.name
directory_cache.mkdir(exist_ok=True)
with filename_url_tiffs.open("r") as f:
for url in sorted(f.readlines()):
url = url.strip()
name = url.split("/")[-1]
filename = directory_cache / name
if self.orux_maps.context.only_tiffs is not None:
if filename.name not in self.orux_maps.context.only_tiffs:
continue
if not filename.exists():
print(f"Downloading {filename.relative_to(DIRECTORY_BASE)}")
r = requests.get(url)
filename.write_bytes(r.content)
yield filename
def iter_filename_tiff():
if self.layer_param.tiff_filename:
# For big scales, the image has to be extracted form a zip file
tiff_filename = (
DIRECTORY_CACHE_TIF
/ self.layer_param.name
/ self.layer_param.tiff_filename
)
d = DownloadZipAndExtractTiff(
url=self.layer_param.tiff_url, tiff_filename=tiff_filename
)
d.download()
yield tiff_filename
return
filename_url_tiffs = self.directory_resources / "url_tiffs.txt"
yield from iter_download_tiffs(filename_url_tiffs)
tiff_image_attributes = []
for filename in iter_filename_tiff():
tiff_attributes = TiffImageAttributes.create(
layer_param=self.layer_param,
filename=filename,
)
tiff_image_attributes.append(tiff_attributes)
if len(tiff_image_attributes) == 0:
raise Exception(
f"No valid tiff for this scale {self.layer_param.scale} found"
)
for tiff in tiff_image_attributes:
self.layer_param.verify_m_per_pixel(tiff.m_per_pixel)
c.dump(list_attrs=tiff_image_attributes)
def create_png_pickle(self):
"""
This takes all tiffs for one scale and creates for each tiff a pickle file with all pngs
"""
c = CacheTiffBoundsCH1903(
layer_param=self.layer_param, context=self.orux_maps.context
)
assert c.filename.exists()
list_tiff_attrs = c.load()
arguments = [
dict(
context=self.orux_maps.context,
tiff_attrs=tiff_attrs,
i=i,
total=len(list_tiff_attrs),
)
for i, tiff_attrs in enumerate(list_tiff_attrs)
]
if self.orux_maps.context.multiprocessing:
with Pool(8) as p:
p.map(multiprocess_create_tiles2, arguments, chunksize=1)
return
for args in arguments:
multiprocess_create_tiles(**args)
def create_map(self):
if not self.orux_maps.context.skip_tiff_read:
c = CacheTiffBoundsCH1903(
layer_param=self.layer_param, context=self.orux_maps.context
)
assert c.filename.exists()
list_tiff_attrs = c.load()
boundsCH1903_extrema = create_boundsCH1903_extrema()
for tiff_attrs in list_tiff_attrs:
# Align the tiff and shrink it to complete tiles
# lat_m = tiff_images.boundsCH1903_floor.a.lat % layer_param.m_per_tile
# lon_m = tiff_images.boundsCH1903_floor.a.lon % layer_param.m_per_tile
boundsCH1903_extrema.extend(tiff_attrs.boundsCH1903_floor)
print(
f"{self.layer_param.scale}: {len(list_tiff_attrs)}tifs {boundsCH1903_extrema.lon_m/1000.0:0.3f}x{boundsCH1903_extrema.lat_m/1000.0:0.3f}km"
)
width_pixel = int(boundsCH1903_extrema.lon_m / self.layer_param.m_per_pixel)
height_pixel = int(
boundsCH1903_extrema.lat_m / self.layer_param.m_per_pixel
)
# assert width_pixel % self.layer_param.pixel_per_tile == 0
# assert height_pixel % self.layer_param.pixel_per_tile == 0
boundsWGS84 = boundsCH1903_extrema.to_WGS84()
self.orux_maps.xml_otrk2.write_layer(
calib=boundsWGS84,
TILE_SIZE=self.layer_param.pixel_per_tile,
map_name=self.orux_maps.map_name,
id=self.layer_param.orux_layer,
xMax=width_pixel // self.layer_param.pixel_per_tile,
yMax=height_pixel // self.layer_param.pixel_per_tile,
height=height_pixel,
width=width_pixel,
minLat=boundsWGS84.southEast.lat_m,
maxLat=boundsWGS84.northWest.lat_m,
minLon=boundsWGS84.northWest.lon_m,
maxLon=boundsWGS84.southEast.lon_m,
)
for tiff_attrs in list_tiff_attrs:
tiff_image_converter = TiffImageConverter(
context=self.orux_maps.context,
tiff_attrs=tiff_attrs,
)
tiff_image_converter.append_sqlite(
db=self.orux_maps.db, boundsCH1903_extrema=boundsCH1903_extrema
)
self.debug_logger.report(
list_tiff_attrs=list_tiff_attrs, boundsCH1903_extrema=boundsCH1903_extrema
)
def multiprocess_create_tiles2(argument):
multiprocess_create_tiles(**argument)
def multiprocess_create_tiles(context, tiff_attrs, i, total):
assert isinstance(context, Context)
assert isinstance(tiff_attrs, TiffImageAttributes)
assert isinstance(i, int)
assert isinstance(total, int)
label = f"{tiff_attrs.filename.relative_to(DIRECTORY_BASE)} {i+1}({total})"
tiff_image_converter = TiffImageConverter(context=context, tiff_attrs=tiff_attrs)
tiff_image_converter.create_tiles(label=label)
@dataclass
class PngCache:
x_tile: int
y_tile: int
raw_png: bytes
@dataclass
class TiffCache:
list_png: list
orux_layer: int
nw: CH1903
def __init__(self, nw: CH1903, orux_layer: int):
assert isinstance(nw, CH1903)
assert isinstance(orux_layer, int)
self.nw = nw
self.orux_layer = orux_layer
self.list_png = []
def append(self, png_cache: PngCache):
assert isinstance(png_cache, PngCache)
self.list_png.append(png_cache)
@dataclass
class TiffImageAttributes:
filename: pathlib.Path
m_per_pixel: float
layer_param: LayerParams
boundsCH1903: BoundsCH1903
boundsCH1903_floor: BoundsCH1903
@staticmethod
def create(filename: pathlib.Path, layer_param: LayerParams):
assert isinstance(filename, pathlib.Path)
assert isinstance(layer_param, LayerParams)
with rasterio.open(filename, "r") as dataset:
pixel_lon = dataset.width
pixel_lat = dataset.height
calculated_pixel_per_tile = math.gcd(pixel_lon, pixel_lat)
if layer_param.pixel_per_tile != calculated_pixel_per_tile:
print(
f"{filename.relative_to(DIRECTORY_BASE)}: pixel_per_tile: expected {layer_param.pixel_per_tile}, calculated {calculated_pixel_per_tile}"
)
t = dataset.get_transform()
northwest_lon = t[0]
northwest_lat = t[3]
m_per_pixel = t[1]
assert t[1] == -t[5]
northwest = CH1903(lon_m=t[0], lat_m=t[3])
southeast = CH1903(
lon_m=northwest.lon_m + pixel_lon * m_per_pixel,
lat_m=northwest.lat_m - pixel_lat * m_per_pixel,
)
boundsCH1903 = BoundsCH1903(nw=northwest, se=southeast)
boundsCH1903_floor = boundsCH1903.floor(
floor_m=layer_param.m_per_tile,
)
if not boundsCH1903.equals(boundsCH1903_floor):
print(f"{filename.relative_to(DIRECTORY_BASE)}: cropped")
layer_param.verify_m_per_pixel(m_per_pixel)
projection.assertSwissgridIsNorthWest(boundsCH1903)
return TiffImageAttributes(
filename=filename,
m_per_pixel=m_per_pixel,
layer_param=layer_param,
boundsCH1903=boundsCH1903,
boundsCH1903_floor=boundsCH1903_floor,
)
class TiffImageConverter:
def __init__(self, context, tiff_attrs):
assert isinstance(context, Context)
assert isinstance(tiff_attrs, TiffImageAttributes)
self.context = context
self.tiff_attrs = tiff_attrs
self.layer_param = tiff_attrs.layer_param
self.filename = tiff_attrs.filename
self.boundsCH1903 = tiff_attrs.boundsCH1903
self.boundsCH1903_floor = tiff_attrs.boundsCH1903_floor
self.debug_pngs = []
def _load_image(self):
with rasterio.open(self.filename, "r") as dataset:
if len(dataset.indexes) == 3:
# https://rasterio.readthedocs.io/en/latest/topics/image_processing.html
# rasterio: (bands, rows, columns)
# PIL: rows, columns, bands)
data = dataset.read()
img_arr = rasterio.plot.reshape_as_image(data)
img = PIL.Image.fromarray(img_arr, mode="RGB")
del data
del img_arr
assert dataset.width == img.width
assert dataset.height == img.height
else:
img = PIL.Image.open(self.filename, mode="r")
img = img.convert("RGB")
return img
@property
def _filename_pickle_png_cache(self):
return (
directory_png_cache(layer_param=self.layer_param, context=self.context)
/ self.filename.with_suffix(".pickle").name
)
def create_tiles(self, label):
if self._filename_pickle_png_cache.exists():
# The tile have already been created
return
self._filename_pickle_png_cache.parent.mkdir(exist_ok=True, parents=True)
if self.context.save_diskspace:
self.filename.unlink()
self._create_tiles2(label)
def _create_tiles2(self, label):
lon_offset_m = round(
self.boundsCH1903_floor.nw.lon_m - self.boundsCH1903.nw.lon_m
)
lat_offset_m = round(
self.boundsCH1903_floor.nw.lat_m - self.boundsCH1903.nw.lat_m
)
assert lon_offset_m >= 0
assert lat_offset_m <= 0
x_first_tile_pixel = round(lon_offset_m // self.layer_param.m_per_pixel)
y_first_tile_pixel = round(-lat_offset_m // self.layer_param.m_per_pixel)
assert 0 <= x_first_tile_pixel < self.layer_param.pixel_per_tile
assert 0 <= y_first_tile_pixel < self.layer_param.pixel_per_tile
if self.context.skip_tiff_read:
return
tiff_cache = TiffCache(
orux_layer=self.layer_param.orux_layer, nw=self.boundsCH1903_floor.nw
)
with self._load_image() as img:
#
# Die Tiles fuer die Karte zusammenkopieren
#
width = img.width - x_first_tile_pixel
height = img.height - y_first_tile_pixel
x_count = width // | |
-------
collected : `bytes`
Raises
------
ValueError
If `n` is given as a negative integer.
EofError
Connection lost before `n` bytes were received.
CancelledError
If the reader task is cancelled not by receiving eof.
"""
if n < 1:
if n < 0:
raise ValueError(f'.read_exactly called with negative `n`: {n!r}.')
else:
return b''
chunks = self._chunks
if chunks:
chunk = chunks[0]
offset = self._offset
else:
if self._at_eof:
raise EOFError(b'')
chunk = yield from self._wait_for_data()
offset = 0
chunk_size = len(chunk)
if offset == 0:
if chunk_size > n:
self._offset = n
return chunk[:n]
# chunk same size as the requested?
elif chunk_size == n:
del chunks[0]
# offset is already 0, nice!
return chunk
else:
n -= len(chunk)
collected = [chunk]
del chunks[0]
else:
end = offset+n
if chunk_size > end:
self._offset = end
return chunk[offset:end]
# chunk_size + offset end when the requested's end is.
elif chunk_size == end:
del chunks[0]
self._offset = 0
return chunk[offset:]
else:
n -= (chunk_size - offset)
collected = [memoryview(chunk)[offset:]]
del chunks[0]
while True:
if chunks:
chunk = chunks[0]
else:
if self._at_eof:
self._offset = 0
raise EOFError(b''.join(collected))
chunk = yield from self._wait_for_data()
chunk_size = len(chunk)
n -= chunk_size
if n > 0:
collected.append(chunk)
del chunks[0]
continue
if n == 0:
collected.append(chunk)
del chunks[0]
self._offset = 0
return b''.join(collected)
offset = self._offset = chunk_size+n
collected.append(memoryview(chunk)[:offset])
return b''.join(collected)
def _read_until(self, boundary):
"""
Payload reader task, what reads until `boundary` is hit.
This method is a generator.
Parameters
----------
boundary : `bytes`
The amount of bytes to read.
Returns
-------
collected : `bytes` or `bytearray`
Raises
------
EofError
Connection lost before `n` bytes were received.
CancelledError
If the reader task is cancelled not by receiving eof.
"""
# This method is mainly used for multipart reading, and we can forget optimizations usually.
boundary_length = len(boundary)
chunks = self._chunks
if chunks:
chunk = chunks[0]
offset = self._offset
else:
if self._at_eof:
raise EOFError(b'')
chunk = yield from self._wait_for_data()
offset = 0
# Optimal case is when we instantly hit boundary.
if len(chunk) > boundary_length:
index = chunk.find(boundary, offset)
if index != -1:
# Barrier found
data = chunk[offset:index]
offset += boundary
if offset == len(chunk):
del chunks[0]
offset = 0
self._offset = offset
return data
offset = len(chunk) - boundary_length
# Second case, we create a bytearray and push the data to it.
data = bytearray(chunk)
while True:
if chunks:
chunk = chunks[0]
else:
if self._at_eof:
raise EOFError(b'')
chunk = yield from self._wait_for_data()
data.extend(chunk)
index = chunk.find(boundary, offset)
if index != -1:
# Barrier found
offset = len(chunk)-len(data)+index+boundary_length
if offset == len(chunk):
del chunks[0]
offset = 0
self._offset = offset
del data[index:]
return data
offset = len(data) - boundary_length
del chunks[0]
def _read_multipart(self, boundary, is_first):
"""
Payload reader task, which reads an http response's status line and headers.
This method is a generator.
Returns
-------
is_more : `bool`
Whether the payload contains more multipart field.
headers : `None` or ``imultidict`` of (`str`, `str`)
Received response headers.
chunk : `None`, `bytes` or `bytearray`
The field content.
Raises
------
EOFError
Connection lost before enough data was received.
PayloadError
Invalid data received.
CancelledError
If the reader task is cancelled not by receiving eof.
"""
if is_first:
yield from self._read_until(b'--' + boundary)
try:
maybe_end_1 = yield from self._read_exactly(2)
except EOFError:
# End of payload? Ok i guess.
return False, None, None
else:
if maybe_end_1 == b'\r\n':
pass
elif maybe_end_1 == b'--':
# End of payload?
try:
maybe_end_2 = yield from self._read_exactly(2)
except EOFError:
return False, None, None
else:
if maybe_end_2 == b'\r\n':
return False, None, None
else:
raise PayloadError(f'Multipart boundary not ended with b\'--\'+b\'\r\n\', got '
f'b\'--\'+{maybe_end_2!r}')
else:
raise PayloadError(f'Multipart boundary not ended either with b\'--\' or b\'\r\n\', got '
f'{maybe_end_1!r}')
chunk, offset = yield from self._read_http_helper()
headers = yield from self._read_http_headers(chunk, offset)
length = headers.get(CONTENT_LENGTH, None)
if length is None:
part = yield from self._read_until(b'\r\n--'+boundary)
else:
length = int(length)
part = yield from self._read_exactly(length)
try:
maybe_boundary = yield from self._read_exactly(len(boundary)+4)
except EOFError:
return False, None, part
if maybe_boundary != b'\r\n--'+boundary:
raise PayloadError(f'Multipart payload not ended with boundary, expected: b\'\r\n\' + b\'--\' + '
f'{boundary!r}, got {maybe_boundary!r}.')
try:
maybe_end_1 = yield from self._read_exactly(2)
except EOFError:
return False, headers, part
if maybe_end_1 == b'\r\n':
return True, headers, part
if maybe_end_1 == b'--':
try:
maybe_end_2 = yield from self._read_exactly(2)
except EOFError:
return False, headers, part
if maybe_end_2 == b'\r\n':
return False, headers, part
raise PayloadError(f'Multipart boundary not ended with b\'--\'+b\'\r\n\', got '
f'b\'--\'+{maybe_end_2!r}')
raise PayloadError(f'Multipart boundary not ended either with b\'--\' or b\'\r\n\', got '
f'{maybe_end_1!r}')
async def read_multipart(self, headers):
"""
Reads multipart data from the protocol
This method is an asynchronous generator.
Parameters
----------
headers : ``imultidict``
The response's or the request's headers.
Yields
------
headers : `imultidict`
The multipart's headers.
data : `bytes` or `bytes-like`
The multipart's data.
Raises
------
EOFError
Connection lost before enough data was received.
PayloadError
Invalid data received.
CancelledError
If the reader task is cancelled not by receiving eof.
ContentEncodingError
- `'content_encoding'` was given as `'br'` meanwhile brotli or brotlipy are not installed.
- `'content_encoding'` is not an from the expected values.
StopAsyncIteration
The payload contains no more fields.
"""
content_type = headers[CONTENT_TYPE]
mime = MimeType(content_type)
boundary = mime.parameters['boundary']
is_first = True
while True:
is_more, headers, data = await self.set_payload_reader(self._read_multipart(boundary, is_first))
if data is not None:
try:
transfer_encoding = headers[CONTENT_TRANSFER_ENCODING]
except KeyError:
pass
else:
transfer_encoding = transfer_encoding.lower()
if transfer_encoding == 'base64':
data = base64.b64decode(data)
elif transfer_encoding == 'quoted-printable':
data = binascii.a2b_qp(data)
elif transfer_encoding in ('binary', '8bit', '7bit'):
pass
else:
raise PayloadError(f'Unknown transfer encoding: {transfer_encoding!r}')
try:
content_encoding = headers[CONTENT_ENCODING]
except KeyError:
pass
else:
content_encoding = content_encoding.lower()
decompressobj = self._decompressor_for(content_encoding)
if (decompressobj is not None):
try:
data = decompressobj.decompress(data)
except COMPRESSION_ERRORS:
raise PayloadError('Cannot decompress data.') from None
yield headers, data
if not is_more:
return
is_first = False
continue
def _read_http_helper(self):
"""
Payload reader task helper. Returns if any chunk is already received, or waits for a new one.
This method is a generator.
Returns
-------
chunk : `bytes`
A received chunk of data.
offset : `int`
The offset, till the chunk was already used up.
Raises
------
EOFError
Connection lost before a chunk was received.
CancelledError
If the reader task is cancelled not by receiving eof.
"""
chunks = self._chunks
if chunks:
chunk = chunks[0]
offset = self._offset
else:
if self._at_eof:
raise EOFError(b'')
chunk = yield from self._wait_for_data()
offset = 0
return chunk, offset
def _read_http_response(self):
"""
Payload reader task, which reads an http response's status line and headers.
This method is a generator.
Returns
-------
response_message : ``RawResponseMessage``
Raises
------
ConnectionError
Connection lost before enough data was received.
PayloadError
Invalid data received.
CancelledError
If the reader task is cancelled not by receiving eof.
"""
try:
try:
chunk, offset = yield from self._read_http_helper()
except EOFError as err:
args = err.args
if (args is None) or (not args) or (not args[0]):
raise ConnectionError(CONNECTION_ERROR_EOF_NO_HTTP_HEADER)
raise
parsed = HTTP_STATUS_RP.match(chunk, offset)
if parsed is None:
# stupid fallback
line = yield from self._read_until_CRLF()
parsed = HTTP_STATUS_LINE_RP.fullmatch(line)
if parsed is None:
raise PayloadError(f'Invalid status line: {line!r}.')
chunk, offset = yield from self._read_http_helper()
else:
offset = parsed.end()
major, minor, status, reason = parsed.groups()
headers = yield from self._read_http_headers(chunk, offset)
return RawResponseMessage(HttpVersion(int(major), int(minor)), int(status), reason, headers)
except EOFError as err:
raise PayloadError(PAYLOAD_ERROR_EOF_AT_HTTP_HEADER) from err
def _read_http_request(self):
"""
Payload reader task, which reads an http request's status line and headers.
This method is a generator.
Returns
-------
request_message : ``RawRequestMessage``
| |
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
ss = -1
flags = None
seq = pkt.underlayer.fields["seq"]
push = False
flags_bits = list(int2bin(pkt.underlayer.fields["flags"]))
if flags_bits[11] == '1':
flags = 'A'
if flags_bits[12] == '1':
flags = flags + 'P'
if 'P' in flags:
push = True
else:
push = False
if not is_created_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"], pkt.underlayer.fields["dport"]):
seqn = pkt.underlayer.fields["seq"]
stream = Stream(s, push, seqn)
create_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
elif is_created_stream_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]):
seqn = pkt.underlayer.fields["seq"]
stream = Stream(s, push, seqn)
build_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
if not dissector.Dissector.preprocess_done:
return "", ""
if len(sessions) > 0:
if is_stream_end(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream):
ss = get_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"], stream)
if not ss == -1:
s = ss
else:
return "", ""
self.myresult = ""
resultlist = []
if s.upper().startswith("SSH"):
return "", s
for c in s:
ustruct = struct.unpack(self.fmt, c)
byte = str(hex(ustruct[0]))[2:]
if len(byte) == 1:
byte = "0" + byte
self.myresult = self.myresult + byte
if not s.startswith("SSH") and len(self.myresult) > 12:
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]):
pakl = str(int(self.myresult[:8], 16))
padl = str(int(self.myresult[8:10], 16))
payloadl = int(pakl) - int(padl) - 1
opcode = self.get_code_msg(int(self.myresult[10:12], 16))
payload = self.myresult[12:12 + payloadl * 2]
padding = self.myresult[12 + payloadl * 2:12 + payloadl * 2\
+ int(padl) * 2]
resultlist.append(("packet_length", pakl))
resultlist.append(("padding_length", padl))
resultlist.append(("opcode", opcode))
if is_encrypted_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
if is_created_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
encrypted_payload = base64.standard_b64encode(\
self.get_ascii(self.myresult[:\
get_mac_length(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]) * 2]))
else:
encrypted_payload = base64.standard_b64encode(\
self.myresult[:])
resultlist.append(("encrypted_payload", encrypted_payload))
if is_created_session(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"]):
mac = base64.standard_b64encode(\
self.get_ascii(self.myresult[\
get_mac_length(pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) * 2:]))
resultlist.append(("mac", mac))
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) and\
opcode.startswith("SSH_MSG_KEXDH_INIT"):
try:
e_length = int(self.myresult[12:20], 16)
e = base64.standard_b64encode(\
self.get_ascii(self.myresult[20:20 + e_length * 2]))
resultlist.append(("e_length", str(e_length)))
resultlist.append(("e", e))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"]) and\
opcode.startswith("SSH_MSG_KEXDH_REPLY"):
try:
server_public_host_key_and_certificates_K_S_length =\
int(self.myresult[12:20], 16)
server_public_host_key_and_certificates_K_S =\
self.myresult[20:20 +\
server_public_host_key_and_certificates_K_S_length * 2]
f_length = int(self.myresult[20 + \
server_public_host_key_and_certificates_K_S_length\
* 2:20 + server_public_host_key_and_certificates_K_S_length\
* 2 + 8], 16)
f = base64.standard_b64encode(\
self.get_ascii(self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8:20 + server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2]))
signature_of_h_length = int(self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2:20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8], 16)
signature_of_h = self.myresult[20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8:20 +\
server_public_host_key_and_certificates_K_S_length\
* 2 + 8 + f_length * 2 + 8 +\
signature_of_h_length * 2]
resultlist.append(\
("server_public_host_key_and_certificates_K_S_length",\
str(server_public_host_key_and_certificates_K_S_length)))
resultlist.append(\
("server_public_host_key_and_certificates_K_S",\
base64.standard_b64encode(\
self.get_ascii(server_public_host_key_and_certificates_K_S))))
resultlist.append(("f_length", str(f_length)))
resultlist.append(("f", f))
resultlist.append(("signature_of_h_length",
str(signature_of_h_length)))
resultlist.append(("signature_of_h",
base64.standard_b64encode(\
self.get_ascii(signature_of_h))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_SERVICE_REQUEST"):
try:
service_name_length = int(self.myresult[12:20], 16)
service_name = self.myresult[20:20 \
+ service_name_length * 2]
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
base64.standard_b64encode(self.get_ascii(service_name))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_SERVICE_ACCEPT"):
try:
service_name_length = int(self.myresult[12:20], 16)
service_name = self.myresult[20:20 +\
service_name_length * 2]
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
self.get_ascii(service_name)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_NEWKEYS"):
try:
set_as_encrypted(pkt.underlayer.underlayer.fields["src"],
pkt.underlayer.underlayer.fields["dst"],
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_DISCONNECT"):
try:
reason_code = self.get_discnct_msg(int(\
self.myresult[12:20], 16)) * 2
description_length = int(\
self.myresult[20:28], 16)
description = self.myresult[28:28 +\
description_length * 2]
language_tag_length = int(\
self.myresult[28 + description_length * 2:28 +\
description_length * 2 + 8], 16)
language_tag = self.myresult[28 + description_length\
* 2 + 8:28 + description_length * 2 + 8 +\
language_tag_length * 2]
resultlist.append(("reason_code", reason_code))
resultlist.append(("description_length",
str(description_length)))
resultlist.append(("description",
self.get_ascii(description)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_IGNORE"):
try:
data_length = int(self.myresult[12:20], 16)
data = self.myresult[20:20 + data_length * 2]
resultlist.append(("data_length", str(data_length)))
resultlist.append(\
("data", base64.standard_b64encode(self.get_ascii(data))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_PK_OK"):
try:
public_key_algorithm_name_from_the_request_length =\
int(self.myresult[12:20], 16)
public_key_algorithm_name_from_the_request =\
self.myresult[20:20 +\
public_key_algorithm_name_from_the_request_length * 2]
public_key_blob_from_the_request_length = int(\
self.myresult[20 + \
public_key_algorithm_name_from_the_request_length * 2:20\
+ public_key_algorithm_name_from_the_request_length * 2\
+ 8], 16)
public_key_blob_from_the_request = self.myresult[20 +\
public_key_algorithm_name_from_the_request_length * 2 +\
8:20 + public_key_algorithm_name_from_the_request_length\
* 2 + 8 + public_key_blob_from_the_request_length * 2]
resultlist.append((\
"public_key_algorithm_name_from_the_request_length",
str(public_key_algorithm_name_from_the_request_length)))
resultlist.append(\
("public_key_algorithm_name_from_the_request",\
self.get_ascii(\
public_key_algorithm_name_from_the_request)))
resultlist.append(\
("public_key_blob_from_the_request_length",
str(public_key_blob_from_the_request_length)))
resultlist.append(("public_key_blob_from_the_request",
self.get_ascii(public_key_blob_from_the_request)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_DEBUG"):
try:
always_display_boolean = int(self.myresult[12:14], 16)
description_length = int(self.myresult[14:22], 16)
description = self.myresult[22:22 +\
description_length * 2]
language_tag_length = int(self.myresult[22 +\
description_length * 2:22 + description_length\
* 2 + 8], 16)
language_tag = self.myresult[22 + description_length\
* 2 + 8:22 + description_length * 2 + 8 +\
language_tag_length * 2]
resultlist.append(("always_display_boolean",
always_display_boolean))
resultlist.append(("description_length",
str(description_length)))
resultlist.append(("description",
self.get_ascii(description)))
resultlist.append(("language_tag_length",
str(language_tag_length)))
resultlist.append(("language_tag",
self.get_ascii(language_tag)))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_UNIMPLEMENTED"):
try:
seqn = int(self.myresult[12:20], 16)
resultlist.append(\
("packet sequence number of rejected message", seqn))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_CHANNEL_DATA"):
try:
recipient_channel = int(self.myresult[12:20], 16)
data_length = int(self.myresult[20:28], 16)
data = self.myresult[28:28 + data_length * 2]
resultlist.append(("recipient_channel", recipient_channel))
resultlist.append(("data_length", str(data_length)))
resultlist.append(\
("data", base64.standard_b64encode(self.get_ascii(data))))
self.found = True
except Exception:
self.found = False
if not is_encrypted_session(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"])\
and opcode.startswith("SSH_MSG_USERAUTH_REQUEST"):
try:
user_name_length = int(self.myresult[12:20], 16)
user_name = self.myresult[20:20 + user_name_length * 2]
service_name_length = int(self.myresult[20 +\
user_name_length * 2:20 + user_name_length * 2 + 8], 16)
service_name = self.myresult[20 + user_name_length *\
2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2]
method_name_length = int(self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2:20 + user_name_length * 2 + 8 + service_name_length\
* 2 + 8], 16)
method_name = self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2]
resultlist.append(("user_name_length",
str(user_name_length)))
resultlist.append(("user_name",
self.get_ascii(user_name)))
resultlist.append(("service_name_length",
str(service_name_length)))
resultlist.append(("service_name",
self.get_ascii(service_name)))
resultlist.append(("method_name_length",
str(method_name_length)))
resultlist.append(("method_name",
self.get_ascii(method_name)))
if method_name.startswith("publickey"):
boolean = int(self.myresult[20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2:20 + user_name_length *\
2 + 8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8], 16)
public_key_algorithm_name_length =\
int(self.myresult[20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8:20 + user_name_length * 2 + 8 +\
service_name_length * 2 + 8 + method_name_length\
* 2 + 8 + 8], 16)
public_key_algorithm_name = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8:20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2]
resultlist.append(("boolean", boolean))
resultlist.append(("public_key_algorithm_name_length",
str(public_key_algorithm_name_length)))
resultlist.append(("public_key_algorithm_name",
self.get_ascii(public_key_algorithm_name)))
if boolean == 0:
public_key_blob_length =\
int(self.myresult[20 + user_name_length * 2 +\
8 + service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 +\
method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8],
16)
public_key_blob = self.myresult[20 +\
user_name_length * 2 + 8 + service_name_length *\
2 + 8 + method_name_length * 2 + 8 + 8 +\
public_key_algorithm_name_length * 2 + 8:20 +\
user_name_length * 2 + 8 +\
service_name_length * 2 + 8 | |
to_gregorian(islam_year, 12, 10)
if y == year:
self[date(y, m, d)] = "Feast of the Sacrifice"
# Last Monday of August.
c = Calendar(firstweekday=MONDAY)
monthcal = c.monthdatescalendar(year, 8) # all dates in August in full weeks.
for i in range(1, len(monthcal) + 1):
if monthcal[-i][0].month == 8: # checks if the Monday is in August.
self[monthcal[i][0]] = "National Heroes' Day"
break
self[date(year, 11, 30)] = "Bonifacio Day"
self[date(year, 12, 25)] = "Christmas Day"
self[date(year, 12, 30)] = "Rizal Day"
class PH(Philippines):
pass
class Pakistan(HolidayBase):
"""Implements public holidays in Pakistan."""
def __init__(self, **kwargs):
self.country = "PK"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 2, 5)] = "Kashmir Solidarity Day"
self[date(year, 3, 23)] = "Pakistan Day"
self[date(year, 5, 1)] = "Labor Day"
self[date(year, 8, 14)] = "Independence Day"
self[date(year, 11, 9)] = "Iqbal Day"
# Also birthday of PK founder
self[date(year, 12, 25)] = "Christmas Day"
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 8, 22)[0]
y1, m1, d1 = to_gregorian(islam_year, 12, 10)
y2, m2, d2 = to_gregorian(islam_year, 12, 11)
y3, m3, d3 = to_gregorian(islam_year, 12, 12)
if y1 == year:
self[date(y1, m1, d1)] = "Feast of the Sacrifice"
if y2 == year:
self[date(y2, m2, d2)] = "Feast of the Sacrifice"
if y3 == year:
self[date(y3, m3, d3)] = "Feast of the Sacrifice"
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 6, 15)[0]
y1, m1, d1 = to_gregorian(islam_year, 10, 1)
y2, m2, d2 = to_gregorian(islam_year, 10, 2)
y3, m3, d3 = to_gregorian(islam_year, 10, 3)
if y1 == year:
self[date(y1, m1, d1)] = "Eid al-Fitr"
if y2 == year:
self[date(y2, m2, d2)] = "Eid al-Fitr"
if y3 == year:
self[date(y3, m3, d3)] = "Eid al-Fitr"
# 12th day of 3rd Islamic month
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 11, 20)[0]
y, m, d = to_gregorian(islam_year, 3, 12)
if y == year:
self[date(y, m, d)] = "Mawlid"
# 10th and 11th days of 1st Islamic month
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 10, 1)[0]
y1, m1, d1 = to_gregorian(islam_year, 1, 10)
y2, m2, d2 = to_gregorian(islam_year, 1, 11)
if y1 == year:
self[date(y1, m1, d1)] = "Day of Ashura"
if y2 == year:
self[date(y2, m2, d2)] = "Day of Ashura"
# 27th day of the month of Rajab, the 7th month in the Islamic calendar.
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 4, 13)[0]
y, m, d = to_gregorian(islam_year, 7, 27)
if y == year:
self[date(y, m, d)] = "<NAME>"
self[date(year, 9, 6)] = "Defence Day"
self[date(year, 9, 11)] = "Death Anniversary of Quaid-e-Azam"
class PK(Pakistan):
pass
class Bangladesh(HolidayBase):
"""Implements public holidays in Bangladesh."""
def __init__(self, **kwargs):
self.country = "BD"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 2, 21)] = "Language Martyrs' Day"
self[date(year, 3, 17)] = "Mujib's birthday"
self[date(year, 3, 26)] = "Independence Day"
self[date(year, 4, 14)] = "Bengali New Year's Day"
self[date(year, 4, 15)] = "Bengali New Year's Day"
self[date(year, 5, 1)] = "Labor Day"
self[date(year, 8, 15)] = "National Mourning Day"
self[date(year, 12, 16)] = "Victory Day"
class BD(Bangladesh):
pass
class Egypt(HolidayBase):
"""Implements public holidays in Egypt."""
def __init__(self, **kwargs):
self.country = "EG"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
self[date(year, 1, 7)] = "Coptic Christmas"
# This revolution Day is only after 2011.
if year >= 2011:
self[date(year, 1, 25)] = "Revolution Day 2011"
# Sinai Liberation Day is only after 1982.
if year >= 1982:
self[date(year, 4, 25)] = "Sinai Liberation Day"
self[date(year, 5, 1)] = "Labor Day"
self[date(year, 7, 23)] = "Revolution Day"
self[date(year, 10, 6)] = "Armed Forces Day"
# The Monday following Orthodox Easter
for offset in range(-1, 2, 1):
orthodox_easter = easter(year + offset, method=EASTER_ORTHODOX)
ds = orthodox_easter + timedelta(days=1)
if ds.year == year:
self[ds] = "<NAME>"
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 9, 11)[0]
y, m, d = to_gregorian(islam_year + 1, 1, 1)
if y == year:
self[date(y, m, d)] = "Islamic New Year"
# 12th day of 3rd Islamic month
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 11, 20)[0]
y, m, d = to_gregorian(islam_year, 3, 12)
if y == year:
self[date(y, m, d)] = "Birth of Prophet"
# 1st and 2nd day of 10th Islamic month
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 6, 15)[0]
y1, m1, d1 = to_gregorian(islam_year, 10, 1)
y2, m2, d2 = to_gregorian(islam_year, 10, 2)
y3, m3, d3 = to_gregorian(islam_year, 10, 3)
if y1 == year:
self[date(y1, m1, d1)] = "Eid al-Fitr"
if y2 == year:
self[date(y2, m2, d2)] = "Eid al-Fitr"
if y3 == year:
self[date(y3, m3, d3)] = "Eid al-Fitr"
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 8, 22)[0]
y1, m1, d1 = to_gregorian(islam_year, 12, 10)
y2, m2, d2 = to_gregorian(islam_year, 12, 11)
y3, m3, d3 = to_gregorian(islam_year, 12, 12)
y4, m4, d4 = to_gregorian(islam_year, 12, 13)
if y1 == year:
self[date(y1, m1, d1)] = "Feast of the Sacrifice"
if y2 == year:
self[date(y2, m2, d2)] = "Feast of the Sacrifice"
if y3 == year:
self[date(y3, m3, d3)] = "Feast of the Sacrifice"
if y4 == year:
self[date(y4, m4, d4)] = "Feast of the Sacrifice"
class EG(Egypt):
pass
class China(HolidayBase):
"""Implements public holidays in China."""
def __init__(self, **kwargs):
self.country = "CN"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
# Observes 3 days holidays.
for offset in range(-1, 2, 1):
ds = Converter.Lunar2Solar(Lunar(year + offset, 1, 1)).to_date()
if ds.year == year:
self[ds] = "Chinese New Year"
ds = Converter.Lunar2Solar(Lunar(year + offset, 1, 2)).to_date()
if ds.year == year:
self[ds] = "Chinese New Year"
ds = Converter.Lunar2Solar(Lunar(year + offset, 1, 3)).to_date()
if ds.year == year:
self[ds] = "Chinese New Year"
self[date(year, 4, 4)] = "Tomb-Sweeping Day"
self[date(year, 4, 5)] = "Tomb-Sweeping Day"
self[date(year, 5, 1)] = "Labor Day"
for offset in range(-1, 2, 1):
ds = Converter.Lunar2Solar(Lunar(year + offset, 5, 5)).to_date()
if ds.year == year:
self[ds] = "Dragon Boat Festival"
for offset in range(-1, 2, 1):
ds = Converter.Lunar2Solar(Lunar(year + offset, 8, 15)).to_date()
if ds.year == year:
self[ds] = "Mid-Autumn Festival"
# Observes 3 days holidays.
self[date(year, 10, 1)] = "National Day"
self[date(year, 10, 2)] = "National Day"
self[date(year, 10, 3)] = "National Day"
class CN(China):
pass
class Russia(HolidayBase):
"""Implements public holidays in Russia."""
def __init__(self, **kwargs):
self.country = "RU"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
self[date(year, 1, 2)] = "New Year's Day"
self[date(year, 1, 3)] = "New Year's Day"
self[date(year, 1, 4)] = "New Year's Day"
self[date(year, 1, 5)] = "New Year's Day"
self[date(year, 1, 6)] = "New Year's Day"
self[date(year, 1, 7)] = "Orthodox Christmas Day"
self[date(year, 12, 25)] = "Christmas Day"
self[date(year, 2, 23)] = "Defender of the Fatherland Day"
self[date(year, 3, 8)] = "International Women's Day"
self[date(year, 8, 22)] = "National Flag Day"
self[date(year, 5, 1)] = "Spring and Labour Day"
self[date(year, 5, 9)] = "Victory Day"
self[date(year, 6, 12)] = "Russia Day"
self[date(year, 11, 4)] = "Unity Day"
class RU(Russia):
pass
class Belarus(HolidayBase):
"""Implements public holidays in Belarus."""
def __init__(self, **kwargs):
self.country = "BY"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
self[date(year, 1, 7)] = "Orthodox Christmas Day"
self[date(year, 3, 8)] = "International Women's Day"
self[easter(year, EASTER_ORTHODOX) + timedelta(days=9)] = "Commemoration Day"
self[date(year, 5, 1)] = "Spring and Labour Day"
self[date(year, 5, 9)] = "Victory Day"
self[date(year, 7, 3)] = "Independence Day"
self[date(year, 11, 7)] = "October Revolution Day"
self[date(year, 12, 25)] = "Christmas Day"
class BY(Belarus):
pass
class UnitedArabEmirates(HolidayBase):
"""Implements public holidays in United Arab Emirates."""
def __init__(self, **kwargs):
self.country = "AE"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
self[date(year, 1, 1)] = "New Year's Day"
for offset in range(-1, 2, 1):
islam_year = from_gregorian(year + offset, 6, 15)[0]
y1, m1, d1 = to_gregorian(islam_year, 9, 29)
y2, m2, d2 = to_gregorian(
islam_year, 9, 30
)
| |
<gh_stars>0
import sys
import random, pdb, math, pickle, glob, time, os, re
import torch
import numpy as np
from natsort import natsorted
class DataLoader:
def __init__(self, fname, opt, dataset='simulator', single_shard=False, all_batches=False, randomize=True):
if opt.debug:
single_shard = True
self.opt = opt
self.random = random.Random()
self.random.seed(12345) # use this so that the same batches will always be picked
if dataset == 'i80' or dataset == 'us101':
data_dir = f'traffic-data/state-action-cost/data_{dataset}_v0'
else:
data_dir = dataset
if single_shard:
# quick load for debugging
data_files = [f'{next(os.walk(data_dir))[1][0]}.txt/']
else:
data_files = next(os.walk(data_dir))[1]
self.images = []
self.actions = []
self.costs = []
self.states = []
self.ids = []
self.ego_car_images = []
# if opt.training_method == 'finetune_sim':
# costs_path = data_dir + ".costs"
# costs = torch.load(costs_path)
# self.costs = costs
# print("costs: ",len(costs), len(costs[0]), costs[0][0].keys())
# states_path = data_dir + ".states"
# states = torch.load(states_path)
# self.states = states
# print("states: ",len(states), states[0].shape)
# actions_path = data_dir + ".actions"
# actions = torch.load(actions_path)
# self.actions = actions
# print("actions: ",len(actions), actions[0].shape)
# others_path = data_dir + ".others"
# others = torch.load(others_path)
# road_completed = others["road_completed"]
# collided = others["collided"]
# offscreen = others["offscreen"]
# self.car_sizes = others["car_sizes"]
# path = data_dir.split("/")
# model_path = path[-1]
# path = path[:-1]
# path += ["videos_simulator", model_path]
# images_path = "/".join(path)
# episode_files = natsorted(os.listdir(images_path))
# # print(episode_files[:5])
# episode_images = []
# episode_ego_images = []
# for episode in episode_files:
# images = natsorted(os.listdir(images_path + "/" + episode + "/all/"))
# if '.ipynb_checkpoints' in images:
# images.remove('.ipynb_checkpoints')
# for image in images:
# image = Image.open(images_path + "/" + episode + "/all/" + image)
# # print(np.array(image).shape)
# image = np.array(image)
# episode_images.append(image)
# self.images.append(episode_images)
# ego_images = natsorted(os.listdir(images_path + "/" + episode + "/ego/"))
# if '.ipynb_checkpoints' in ego_images:
# ego_images.remove('.ipynb_checkpoints')
# for ego_image in ego_images:
# ego_image = Image.open(images_path + "/" + episode + "/all/" + ego_image)
# # print(np.array(image).shape)
# ego_image = np.array(ego_image)
# episode_ego_images.append(ego_image)
# self.ego_car_images.append(episode_ego_images)
# self.finetune_indx = []
# for episode_i in range(len(self.images)):
# if not road_completed[episode_i]:
# self.states[episode_i] = self.states[episode_i][-opt.npred + -opt.ncond:]
# self.actions[episode_i] = self.actions[episode_i][-opt.npred + -opt.ncond:]
# self.costs[episode_i] = self.costs[episode_i][-opt.npred + -opt.ncond:]
# self.images[episode_i] = self.images[episode_i][-opt.npred + -opt.ncond:]
# self.ego_car_images[episode_i] = self.ego_car_images[episode_i][-opt.npred + -opt.ncond:]
# self.finetune_indx += episode_i
# combined_data_path = f'{data_dir}/all_data.pth'
# torch.save({
# 'images': self.images,
# 'actions': self.actions,
# 'costs': self.costs,
# 'states': self.states,
# 'ids': self.ids,
# 'ego_car': self.ego_car_images,
# }, combined_data_path)
# self.n_episodes = len(self.images)
# print(f'Number of episodes: {self.n_episodes}')
# splits_path = f'traffic-data/state-action-cost/data_{dataset}_v0' + '/splits.pth'
# car_sizes_path = f'traffic-data/state-action-cost/data_{dataset}_v0' + '/car_sizes.pth'
# else:
for df in data_files:
combined_data_path = f'{data_dir}/{df}/all_data.pth'
if os.path.isfile(combined_data_path):
print(f'[loading data shard: {combined_data_path}]')
data = torch.load(combined_data_path)
self.images += data.get('images')
self.actions += data.get('actions')
self.costs += data.get('costs')
self.states += data.get('states')
self.ids += data.get('ids')
self.ego_car_images += data.get('ego_car')
# print(len(data.get('images')))
data_images = data.get('images')
# print(type(data_images),len(data_images),type(data_images[0]),len(data_images[0]),data_images[0].shape)
else:
print(data_dir)
images = []
actions = []
costs = []
states = []
ids = glob.glob(f'{data_dir}/{df}/car*.pkl')
ids.sort()
ego_car_images = []
for f in ids:
print(f'[loading {f}]')
fd = pickle.load(open(f, 'rb'))
Ta = fd['actions'].size(0)
Tp = fd['pixel_proximity_cost'].size(0)
Tl = fd['lane_cost'].size(0)
# assert Ta == Tp == Tl # TODO Check why there are more costs than actions
# if not(Ta == Tp == Tl): pdb.set_trace()
images.append(fd['images'])
actions.append(fd['actions'])
costs.append(torch.cat((
fd.get('pixel_proximity_cost')[:Ta].view(-1, 1),
fd.get('lane_cost')[:Ta].view(-1, 1),
), 1),)
states.append(fd['states'])
ego_car_images.append(fd['ego_car'])
print(f'Saving {combined_data_path} to disk')
torch.save({
'images': images,
'actions': actions,
'costs': costs,
'states': states,
'ids': ids,
'ego_car': ego_car_images,
}, combined_data_path)
self.images += images
self.actions += actions
self.costs += costs
self.states += states
self.ids += ids
self.ego_car_images += ego_car_images
self.n_episodes = len(self.images)
print(f'Number of episodes: {self.n_episodes}')
f'traffic-data/state-action-cost/data_{dataset}_v0'
splits_path = data_dir + '/splits.pth'
car_sizes_path = data_dir + '/car_sizes.pth'
print(f'[loading car sizes: {car_sizes_path}]')
self.car_sizes = torch.load(car_sizes_path)
if os.path.exists(splits_path):
print(f'[loading data splits: {splits_path}]')
self.splits = torch.load(splits_path)
self.train_indx = self.splits.get('train_indx')
self.valid_indx = self.splits.get('valid_indx')
self.test_indx = self.splits.get('test_indx')
else:
print('[generating data splits]')
rgn = np.random.RandomState(0)
perm = rgn.permutation(self.n_episodes)
n_train = int(math.floor(self.n_episodes * 0.8))
n_valid = int(math.floor(self.n_episodes * 0.1))
self.train_indx = perm[0 : n_train]
self.valid_indx = perm[n_train : n_train + n_valid]
self.test_indx = perm[n_train + n_valid :]
torch.save(dict(
train_indx=self.train_indx,
valid_indx=self.valid_indx,
test_indx=self.test_indx,
), splits_path)
stats_path = data_dir + '/data_stats.pth'
if os.path.isfile(stats_path):
print(f'[loading data stats: {stats_path}]')
stats = torch.load(stats_path)
self.a_mean = stats.get('a_mean')
self.a_std = stats.get('a_std')
self.s_mean = stats.get('s_mean')
self.s_std = stats.get('s_std')
else:
print('[computing action stats]')
all_actions = []
for i in self.train_indx:
all_actions.append(self.actions[i])
all_actions = torch.cat(all_actions, 0)
self.a_mean = torch.mean(all_actions, 0)
self.a_std = torch.std(all_actions, 0)
print('[computing state stats]')
all_states = []
for i in self.train_indx:
all_states.append(self.states[i][:, 0])
all_states = torch.cat(all_states, 0)
self.s_mean = torch.mean(all_states, 0)
self.s_std = torch.std(all_states, 0)
torch.save({'a_mean': self.a_mean,
'a_std': self.a_std,
's_mean': self.s_mean,
's_std': self.s_std}, stats_path)
self.what = None
if all_batches:
self.total_instances = {}
self.episode_index = 0
self.timestep = 0
self.episode_dict = {}
if opt.training_method == 'finetune_train':
self.finetune_dict = {}
self.t_finetune_dict = None
def get_total_instances(self, split, what):
if split == 'train':
indx = self.train_indx
elif split == 'valid':
indx = self.valid_indx
elif split == 'test':
indx = self.test_indx
else:# split == 'finetune_train':
indx = list(self.finetune_dict.keys())
# elif split == 'finetune_sim':
# indx = self.finetune_indx
self.total_instances[split] = 0
# if what == 'finetune' and self.opt.finetune_nepisodes > 0:
# indx = indx[:self.opt.finetune_nepisodes]
if what == 'eval' and self.opt.eval_nepisodes > 0:
indx = indx[:self.opt.eval_nepisodes]
self.what = what
overlap = self.opt.eval_nframes_overlap if what == 'eval' else self.opt.finetune_nframes_overlap
print("len of indx in get total instances:", len(indx))
for index in indx:
# print("index:",index)
# print("images[index]:",self.images[index].shape)
# print(min(self.images[index].size(0), self.states[index].size(0)),(self.opt.ncond + self.opt.npred),overlap)
# print((min(self.images[index].size(0), self.states[index].size(0)) - (self.opt.ncond + self.opt.npred)),(min(self.images[index].size(0), self.states[index].size(0)) - (self.opt.ncond + self.opt.npred))//overlap)
if split == 'finetune_train':
self.total_instances[split] += len(self.finetune_dict[index])
# elif split == 'finetune_sim':
# self.total_instances[split] += len(self.finetune_indx[index])
else:
self.total_instances[split] += (min(self.images[index].size(0), self.states[index].size(0)) - (self.opt.ncond + self.opt.npred))//overlap + 1
return self.total_instances[split]
def set_indices_dict(self, split):
if split == 'train':
indx = self.train_indx
elif split == 'valid':
indx = self.valid_indx
elif split == 'test':
indx = self.test_indx
print(len(indx))
if split not in self.episode_dict:
self.episode_dict[split] = {}
if self.opt.finetune_nepisodes>0:
indx = indx[:self.opt.finetune_nepisodes]
for index in indx:
episode_length = min(self.images[index].size(0), self.states[index].size(0))
self.episode_dict[split][index] = np.arange(episode_length - (self.opt.npred + self.opt.ncond))
# get batch to use for forward modeling
# a sequence of ncond given states, a sequence of npred actions,
# and a sequence of npred states to be predicted
def get_batch_fm(self, split, npred=-1, cuda=True, return_episode_index = False, all_batches = False, randomize = True, what='train'):
# Choose the correct device
flag = 0
device = torch.device('cuda') if cuda else torch.device('cpu')
if split == 'train':
indx = self.train_indx
elif split == 'valid':
indx = self.valid_indx
elif split == 'test':
indx = self.test_indx
elif split == 'finetune_train':
# print(self.t_finetune_dict)
if not self.t_finetune_dict:
self.t_finetune_dict = self.finetune_dict.copy()
indx = list(self.t_finetune_dict.keys())
elif split == 'finetune_sim':
sim = self.finetune_indx
# print(len(indx))
if self.what and self.what == 'eval' and self.opt.eval_nepisodes > 0:
indx = indx[:self.opt.eval_nepisodes]
if all_batches:
if not randomize and self.episode_index is None:
self.episode_index = 0
self.timestep = 0
if npred == -1:
npred = self.opt.npred
# print("train ind ex, len(self.train_indx)")
# print(indx, len(indx), self.total_instances)
images, states, actions, costs, ids, sizes, ego_cars = [], [], [], [], [], [], []
nb = 0
if all_batches:
episodes_indices = np.zeros((self.opt.batch_size,2)).astype(int)
T = self.opt.ncond + npred
while nb < self.opt.batch_size:
if randomize:
# print("indx: ",indx)
if all_batches:
# if split == 'finetune_train':
if not self.t_finetune_dict:
break
else:
s = self.random.choice(list(self.t_finetune_dict.keys()))
# elif split == 'finetune_sim':
# s = self.random.choice(indx)
else:
s = self.random.choice(indx)
else:
# s = self.random.choice(indx)
if self.episode_index is None:
break
s = indx[self.episode_index]
# min is important since sometimes numbers do not align causing issues in stack operation below
episode_length = min(self.images[s].size(0), self.states[s].size(0))
if episode_length >= T:
if randomize:
# s = self.random.choice(indx)
if all_batches:
# print("only s:",s)
# print("s and len of dict[s]",s,len(self.t_finetune_dict[s]))
t = self.random.choice(self.t_finetune_dict[s])
index, = np.where(self.t_finetune_dict[s]==t)
self.t_finetune_dict[s] = np.delete(self.t_finetune_dict[s],index)
# print("s,t,len(dict[s])",s,t,len(self.t_finetune_dict[s]))
if len(self.t_finetune_dict[s])==0:
# print(f"Done with episode no: {s}")
del self.t_finetune_dict[s]
if len(list(self.t_finetune_dict)) == 0:
self.t_finetune_dict = None
# print("after deletion:",self.t_finetune_dict)
# print(len(images), self.total_instances[split], len(self.episode_dict[split]))
else:
t = self.random.randint(0, episode_length - T)
else:
# s = self.episode_index
t = self.timestep
# print(s,t,len(self.images),episode_length, len(self.images[s]), len(self.states[s]), self.images[s][t : t + T].shape, self.states[s][t : t + T, 0].shape)
images.append(self.images[s][t : t + T].to(device))
actions.append(self.actions[s][t : t + T].to(device))
states.append(self.states[s][t : t + T, 0].to(device)) # discard 6 neighbouring cars
costs.append(self.costs[s][t : t + T].to(device))
# if split == 'finetune_sim':
# ids.append(self.ids[self.test_indx[s]])
# else:
ids.append(self.ids[s])
ego_cars.append(self.ego_car_images[s].to(device))
# if split == 'finetune_sim':
# splits = self.ids[self.test_indx[s]].split('/')
# else:
| |
method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_permission_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_permission_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_permission_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/permission'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignPermissionSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_permission_post(self, id, nk, **kwargs):
"""
Creates a new instance in permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_post(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param DesignPermissionSet data:
:return: DesignPermissionSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_permission_post_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_permission_post_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_permission_post_with_http_info(self, id, nk, **kwargs):
"""
Creates a new instance in permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_post_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param DesignPermissionSet data:
:return: DesignPermissionSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_permission_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_permission_post`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_permission_post`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/permission'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignPermissionSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_permission_put(self, id, nk, **kwargs):
"""
Update permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_put(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param DesignPermissionSet data:
:return: DesignPermissionSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_permission_put_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_permission_put_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_permission_put_with_http_info(self, id, nk, **kwargs):
"""
Update permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_put_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param DesignPermissionSet data:
:return: DesignPermissionSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_permission_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_permission_put`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_permission_put`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/permission'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignPermissionSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_portal_get(self, id, nk, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_portal_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_portal_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_portal_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_portal_get_with_http_info(self, id, nk, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_portal_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_portal_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_portal_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_portal_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/portal'.replace('{format}', 'json')
path_params = {}
if 'id' | |
kg/m3
Args:
value (float): value for IDD Field `Moisture Content 4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_4` or None if not set
"""
return self["Moisture Content 4"]
@moisture_content_4.setter
def moisture_content_4(self, value=None):
"""Corresponds to IDD field `Moisture Content 4`"""
self["Moisture Content 4"] = value
@property
def liquid_transport_coefficient_4(self):
"""field `Liquid Transport Coefficient 4`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_4` or None if not set
"""
return self["Liquid Transport Coefficient 4"]
@liquid_transport_coefficient_4.setter
def liquid_transport_coefficient_4(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 4`"""
self["Liquid Transport Coefficient 4"] = value
@property
def moisture_content_5(self):
"""field `Moisture Content 5`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_5` or None if not set
"""
return self["Moisture Content 5"]
@moisture_content_5.setter
def moisture_content_5(self, value=None):
"""Corresponds to IDD field `Moisture Content 5`"""
self["Moisture Content 5"] = value
@property
def liquid_transport_coefficient_5(self):
"""field `Liquid Transport Coefficient 5`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_5` or None if not set
"""
return self["Liquid Transport Coefficient 5"]
@liquid_transport_coefficient_5.setter
def liquid_transport_coefficient_5(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 5`"""
self["Liquid Transport Coefficient 5"] = value
@property
def moisture_content_6(self):
"""field `Moisture Content 6`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 6`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_6` or None if not set
"""
return self["Moisture Content 6"]
@moisture_content_6.setter
def moisture_content_6(self, value=None):
"""Corresponds to IDD field `Moisture Content 6`"""
self["Moisture Content 6"] = value
@property
def liquid_transport_coefficient_6(self):
"""field `Liquid Transport Coefficient 6`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 6`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_6` or None if not set
"""
return self["Liquid Transport Coefficient 6"]
@liquid_transport_coefficient_6.setter
def liquid_transport_coefficient_6(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 6`"""
self["Liquid Transport Coefficient 6"] = value
@property
def moisture_content_7(self):
"""field `Moisture Content 7`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_7` or None if not set
"""
return self["Moisture Content 7"]
@moisture_content_7.setter
def moisture_content_7(self, value=None):
"""Corresponds to IDD field `Moisture Content 7`"""
self["Moisture Content 7"] = value
@property
def liquid_transport_coefficient_7(self):
"""field `Liquid Transport Coefficient 7`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 7`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_7` or None if not set
"""
return self["Liquid Transport Coefficient 7"]
@liquid_transport_coefficient_7.setter
def liquid_transport_coefficient_7(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 7`"""
self["Liquid Transport Coefficient 7"] = value
@property
def moisture_content_8(self):
"""field `Moisture Content 8`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_8` or None if not set
"""
return self["Moisture Content 8"]
@moisture_content_8.setter
def moisture_content_8(self, value=None):
"""Corresponds to IDD field `Moisture Content 8`"""
self["Moisture Content 8"] = value
@property
def liquid_transport_coefficient_8(self):
"""field `Liquid Transport Coefficient 8`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 8`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_8` or None if not set
"""
return self["Liquid Transport Coefficient 8"]
@liquid_transport_coefficient_8.setter
def liquid_transport_coefficient_8(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 8`"""
self["Liquid Transport Coefficient 8"] = value
@property
def moisture_content_9(self):
"""field `Moisture Content 9`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_9` or None if not set
"""
return self["Moisture Content 9"]
@moisture_content_9.setter
def moisture_content_9(self, value=None):
"""Corresponds to IDD field `Moisture Content 9`"""
self["Moisture Content 9"] = value
@property
def liquid_transport_coefficient_9(self):
"""field `Liquid Transport Coefficient 9`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 9`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_9` or None if not set
"""
return self["Liquid Transport Coefficient 9"]
@liquid_transport_coefficient_9.setter
def liquid_transport_coefficient_9(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 9`"""
self["Liquid Transport Coefficient 9"] = value
@property
def moisture_content_10(self):
"""field `Moisture Content 10`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_10` or None if not set
"""
return self["Moisture Content 10"]
@moisture_content_10.setter
def moisture_content_10(self, value=None):
"""Corresponds to IDD field `Moisture Content 10`"""
self["Moisture Content 10"] = value
@property
def liquid_transport_coefficient_10(self):
"""field `Liquid Transport Coefficient 10`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 10`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_10` or None if not set
"""
return self["Liquid Transport Coefficient 10"]
@liquid_transport_coefficient_10.setter
def liquid_transport_coefficient_10(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 10`"""
self["Liquid Transport Coefficient 10"] = value
@property
def moisture_content_11(self):
"""field `Moisture Content 11`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_11` or None if not set
"""
return self["Moisture Content 11"]
@moisture_content_11.setter
def moisture_content_11(self, value=None):
"""Corresponds to IDD field `Moisture Content 11`"""
self["Moisture Content 11"] = value
@property
def liquid_transport_coefficient_11(self):
"""field `Liquid Transport Coefficient 11`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 11`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_11` or None if not set
"""
return self["Liquid Transport Coefficient 11"]
@liquid_transport_coefficient_11.setter
def liquid_transport_coefficient_11(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 11`"""
self["Liquid Transport Coefficient 11"] = value
@property
def moisture_content_12(self):
"""field `Moisture Content 12`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 12`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_12` or None if not set
"""
return self["Moisture Content 12"]
@moisture_content_12.setter
def moisture_content_12(self, value=None):
"""Corresponds to IDD field `Moisture Content 12`"""
self["Moisture Content 12"] = value
@property
def liquid_transport_coefficient_12(self):
"""field `Liquid Transport Coefficient 12`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 12`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_12` or None if not set
"""
return self["Liquid Transport Coefficient 12"]
@liquid_transport_coefficient_12.setter
def liquid_transport_coefficient_12(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 12`"""
self["Liquid Transport Coefficient 12"] = value
@property
def moisture_content_13(self):
"""field `Moisture Content 13`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content 13`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `moisture_content_13` or None if not set
"""
return self["Moisture Content 13"]
@moisture_content_13.setter
def moisture_content_13(self, value=None):
"""Corresponds to IDD field `Moisture Content 13`"""
self["Moisture Content 13"] = value
@property
def liquid_transport_coefficient_13(self):
"""field `Liquid Transport Coefficient 13`
| Units: m2/s
Args:
value (float): value for IDD Field `Liquid Transport Coefficient 13`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `liquid_transport_coefficient_13` or None if not set
"""
return self["Liquid Transport Coefficient 13"]
@liquid_transport_coefficient_13.setter
def liquid_transport_coefficient_13(self, value=None):
"""Corresponds to IDD field `Liquid Transport Coefficient 13`"""
self["Liquid Transport Coefficient 13"] = value
@property
def moisture_content_14(self):
"""field `Moisture Content 14`
| Units: kg/m3
Args:
value (float): value for IDD Field `Moisture Content | |
a difference?
"""
after_b = """\
Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.
Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.
StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.261", "1.261"),
after_sel=("1.272", "1.272"),
command_name="forward-word",
)
#@+node:ekr.20201130090918.67: *3* forward-word-extend-selection
def test_forward_word_extend_selection(self):
"""Test case for forward-word-extend-selection"""
before_b = """\
Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.
Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.
StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?
"""
after_b = """\
Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.
Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.
StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.395", "1.395"),
after_sel=("1.395", "3.4"),
command_name="forward-word-extend-selection",
)
#@+node:ekr.20201130090918.68: *3* indent-relative
def test_indent_relative(self):
"""Test case for indent-relative"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first line
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("5.0", "5.0"),
after_sel=("5.8", "5.8"),
command_name="indent-relative",
)
#@+node:ekr.20201130090918.69: *3* indent-rigidly
def test_indent_rigidly(self):
"""Test case for indent-rigidly"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first line
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.0", "5.0"),
after_sel=("2.0", "5.1"),
command_name="indent-rigidly",
)
#@+node:ekr.20201130090918.70: *3* indent-to-comment-column
def test_indent_to_comment_column(self):
"""Test case for indent-to-comment-column"""
before_b = """\
first line
line b
last line
"""
after_b = """\
first line
line b
last line
"""
self.c.editCommands.ccolumn = 4 # Set the comment column
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.0", "2.0"),
after_sel=("2.4", "2.4"),
command_name="indent-to-comment-column",
)
#@+node:ekr.20201130090918.71: *3* insert-newline
def test_insert_newline(self):
"""Test case for insert-newline"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first li
ne
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.8", "1.8"),
after_sel=("2.0", "2.0"),
command_name="insert-newline",
)
#@+node:ekr.20201130090918.72: *3* insert-parentheses
def test_insert_parentheses(self):
"""Test case for insert-parentheses"""
before_b = """\
first line
line 1
line a
line b
line c
last line
"""
after_b = """\
first() line
line 1
line a
line b
line c
last line
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("1.5", "1.5"),
after_sel=("1.6", "1.6"),
command_name="insert-parentheses",
)
#@+node:ekr.20201130090918.76: *3* kill-line end-body-text
def test_kill_line_end_body_text(self):
"""Test case for kill-line end-body-text"""
before_b = """\
line 1
line 2
line 3
"""
after_b = """\
line 1
line 2
line 3"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("4.1", "4.1"),
after_sel=("3.6", "3.6"),
command_name="kill-line",
)
#@+node:ekr.20201130090918.77: *3* kill-line end-line-text
def test_kill_line_end_line_text(self):
"""Test case for kill-line end-line-text"""
before_b = """\
line 1
line 2
line 3
"""
after_b = """\
line 1
line 2
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.5", "3.5"),
after_sel=("3.0", "3.0"),
command_name="kill-line",
)
#@+node:ekr.20201130090918.79: *3* kill-line start-blank-line
def test_kill_line_start_blank_line(self):
"""Test case for kill-line start-blank-line"""
before_b = """\
line 1
line 2
line 4
"""
after_b = """\
line 1
line 2
line 4
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.0", "3.0"),
after_sel=("3.0", "3.0"),
command_name="kill-line",
)
#@+node:ekr.20201130090918.78: *3* kill-line start-line
def test_kill_line_start_line(self):
"""Test case for kill-line start-line"""
before_b = """\
line 1
line 2
line 3
line 4
"""
after_b = """\
line 1
line 2
line 4
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("3.0", "3.0"),
after_sel=("3.0", "3.0"),
command_name="kill-line",
)
#@+node:ekr.20201130090918.73: *3* kill-paragraph
def test_kill_paragraph(self):
"""Test case for kill-paragraph"""
before_b = """\
Americans live in the most severe weather-prone country on Earth. Each year,
Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000
tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly
weather impacts every American. Communities can now rely on the National Weather
Service’s StormReady program to help them guard against the ravages of Mother
Nature.
Some 90% of all presidentially declared disasters are weather related, leading
to around 500 deaths per year and nearly $14 billion in damage. StormReady, a
program started in 1999 in Tulsa, OK, helps arm America's communities with the
communication and safety skills needed to save lives and property– before and
during the event. StormReady helps community leaders and emergency managers
strengthen local safety programs.
StormReady communities are better prepared to save lives from the onslaught of
severe weather through better planning, education, and awareness. No community
is storm proof, but StormReady can help communities save lives. Does StormReady
make a difference?
"""
after_b = """\
Americans live in the most severe weather-prone country on Earth. Each year,
Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000
tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly
weather impacts every American. Communities can now rely on the National Weather
Service’s StormReady program to help them guard against the ravages of Mother
Nature.
StormReady communities are better prepared to save lives from the onslaught of
severe weather through better planning, education, and awareness. No community
is storm proof, but StormReady can help communities save lives. Does StormReady
make a difference?
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("9.0", "9.0"),
after_sel=("8.0", "8.0"),
command_name="kill-paragraph",
)
#@+node:ekr.20201130090918.74: *3* kill-sentence
def test_kill_sentence(self):
"""Test case for kill-sentence"""
before_b = """\
This is the first sentence. This
is the second sentence. And
this is the last sentence.
"""
after_b = """\
This is the first sentence. And
this is the last sentence.
"""
self.run_test(
before_b=before_b,
after_b=after_b,
before_sel=("2.2", "2.2"),
after_sel=("1.27", "1.27"),
command_name="kill-sentence",
)
#@+node:ekr.20201130090918.82: *3* kill-to-end-of-line after last visible char
def test_kill_to_end_of_line_after_last_visible_char(self):
"""Test | |
= start_time_msecs
if self._get_tz_str_from_epoch('start_time_msecs', start_time_msecs, action_result) is None or self._get_tz_str_from_epoch(
'end_time_msecs', end_time_msecs, action_result) is None:
self.debug_print("Error occurred in tz_str conversion from epoch for 'start_time_msecs' and 'end_time_msecs'. Error: {}".format(
action_result.get_message()))
else:
self.save_progress('Getting offenses data from {0} to {1}'.format(self._get_tz_str_from_epoch(
'start_time_msecs', start_time_msecs, action_result),
self._get_tz_str_from_epoch('end_time_msecs', end_time_msecs, action_result)))
# Backdate the offense ingestion start time by offense_ingest_start_time configuration parameter
# for default ingestion algorithm
if self._is_on_poll and self._offense_ingest_start_time > 0:
self.save_progress('Original start time is: {}'.format(start_time_msecs))
start_time_msecs = start_time_msecs - self._offense_ingest_start_time * 60 * 1000
self.save_progress('Back-dating the offense start time by {} minutes'.format(self._offense_ingest_start_time))
self.save_progress('The modified start_time is: {}'.format(start_time_msecs))
# 5. Create the param dictionary for the range
filter_string += '(({2} >= {0} and {2} <= {1}) or ({3} >= {0} and {3} <= {1}))'.format(
start_time_msecs, end_time_msecs, 'start_time', 'last_updated_time')
# get the list of offenses that we are supposed to query for
offense_ids = str(param.get(phantom.APP_JSON_CONTAINER_ID, param.get(QRADAR_JSON_OFFENSE_ID, None)))
if offense_ids != 'None':
offense_ids = [x.strip() for x in offense_ids.split(",")]
offense_ids = list(filter(None, offense_ids))
offense_id_list = list()
for x in offense_ids:
try:
if len(x.strip()) > 0 and int(x.strip()) >= 0:
offense_id_list.append('id={}'.format(int(x.strip())))
except Exception:
self.debug_print("The provided offense: {} is not valid".format(x))
if len(offense_id_list) > 0:
# If the user is providing the offense IDs to be fetched, irrespective of the
# start_time and the end_time, we will be fetching those offenses
filter_string = ' ({0})'.format(' or '.join(offense_id_list))
else:
return action_result.set_status(phantom.APP_ERROR, "Please provide valid offense ID|s")
params['filter'] = filter_string
params['sort'] = "+last_updated_time"
self.save_progress('Filter for fetching offenses: {0}'.format(filter_string))
offenses = list()
start_index = 0
total_offenses = 0
# 5. Pagination logic for fetching the list of offenses
offenses_status_msg = ''
while True:
# Removing the runs concept as now, we are declaring the support
# for the QRadar instance starting from v7.3.1 and this instance
# does not seem to have the pagination issues with the API now.
# If the action is 'offense_details', fetch all the offenses, and count = None in that case
if count:
end_index = min(start_index + QRADAR_QUERY_HIGH_RANGE - 1, count - 1)
else:
end_index = start_index + QRADAR_QUERY_HIGH_RANGE - 1
if start_index > end_index:
break
headers['Range'] = 'items={0}-{1}'.format(start_index, end_index)
start_index += QRADAR_QUERY_HIGH_RANGE
if ingest_only_open:
offenses_status_msg = 'Fetching all open offenses as the asset configuration parameter for ingest only open is selected. '
params['filter'] = filter_string + ' and status=OPEN'
self.save_progress("Updated filter due to 'Ingest only open offenses' being True is: {0}".format(params['filter']))
response = self._call_api('siem/offenses', 'get', action_result, params=params, headers=headers)
if phantom.is_fail(action_result.get_status()):
self.debug_print("The 'call_api' for fetching offenses failed: ", action_result.get_status())
return action_result.get_status()
self.debug_print("Response Code", response.status_code)
if response.status_code != 200 and 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
if not (200 <= response.status_code <= 399):
# Error condition
if 'json' in response.headers.get('Content-Type', ''):
status_message = self._get_json_error_message(response, action_result)
else:
status_message = '{0}. HTTP status_code: {1}, reason: {2}'.format(
QRADAR_ERR_LIST_OFFENSES_API_FAILED, response.status_code, response.reason)
return action_result.set_status(phantom.APP_ERROR, status_message)
try:
offenses += response.json()
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
self.debug_print("Unable to parse response of 'call_api' for fetching offenses as a valid JSON", error_msg)
return action_result.set_status(phantom.APP_ERROR, "Unable to parse response "
"of 'call_api' for fetching offenses as a valid JSON")
total_offenses = len(offenses)
if len(response.json()) < QRADAR_QUERY_HIGH_RANGE:
self.save_progress(QRADAR_PROG_GOT_X_OFFENSES, total_offenses=total_offenses)
break
# Parse the output, which is an array of offenses
# Update the summary
if total_offenses == 0 and offense_ids != 'None':
return action_result.set_status(phantom.APP_ERROR, "Please provide valid offense ID|s")
action_result.update_summary({QRADAR_JSON_TOTAL_OFFENSES: len(offenses)})
for offense in offenses:
action_result.add_data(offense)
# 6. Update the last fetched offense time in the global variable
# self._new_last_ingest_time for the case of only scheduled or interval polling and not manual polling
# Sort the offenses on the basis of the start_time and last_updated_time both
# Note the recent start_time and recent last_updated_time
# Update the _new_last_ingest_time with the maximum of the two as next time we will fetch the offenses
# whose start_time or last_updated_time is greater than the _new_last_ingest_time
# This new_last_ingest_time variable will be used only in the On_Poll action to store it in the last_saved_ingest_time of the state file
if self._is_on_poll and not self._is_manual_poll and offenses:
offenses.sort(key=lambda x: x['start_time'])
recent_start_time = offenses[-1]['start_time']
offenses.sort(key=lambda x: x['last_updated_time'])
recent_last_updated_time = offenses[-1]['last_updated_time']
self._new_last_ingest_time = max(recent_start_time, recent_last_updated_time)
action_result.set_status(phantom.APP_SUCCESS, '{0}Total Offenses: {1}'.format(offenses_status_msg, len(offenses)))
return action_result.get_status()
def _handle_list_closing_reasons(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
params = dict()
if param.get('include_reserved'):
params['include_reserved'] = True
if param.get('include_deleted'):
params['include_deleted'] = True
if len(params) == 0:
params = None
closing_reasons_response = self._call_api('siem/offense_closing_reasons', 'get', action_result, params=params, headers=None)
if phantom.is_fail(action_result.get_status()):
self.debug_print("call_api failed: ", action_result.get_status())
return action_result.get_status()
if not closing_reasons_response:
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_LIST_OFFENSE_CLOSING_REASONS)
if closing_reasons_response.status_code != 200:
if 'html' in closing_reasons_response.headers.get('Content-Type', ''):
return self._process_html_response(closing_reasons_response, action_result)
# Error condition
if 'json' in closing_reasons_response.headers.get('Content-Type', ''):
status_message = self._get_json_error_message(closing_reasons_response, action_result)
else:
status_message = '{0}. HTTP status_code: {1}, reason: {2}'.format(
QRADAR_ERR_LIST_OFFENSE_CLOSING_REASONS,
closing_reasons_response.status_code,
closing_reasons_response.text if closing_reasons_response.text else "Unknown error occurred."
)
return action_result.set_status(phantom.APP_ERROR, status_message)
try:
closing_reasons = closing_reasons_response.json()
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
self.debug_print(QRADAR_ERR_INVALID_JSON, error_msg)
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_INVALID_JSON)
for closing_reason in closing_reasons:
action_result.add_data(closing_reason)
summary = action_result.update_summary({})
summary['total_offense_closing_reasons'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _get_rule_info(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
rule_id = param.get('rule_id')
ret_val, _ = self._validate_integer(action_result, rule_id, QRADAR_RULE_ID_KEY)
if phantom.is_fail(ret_val):
return action_result.get_status()
get_rule_info_response = self._call_api('analytics/rules/{}'.format(rule_id), 'get', action_result, params=None, headers=None)
if phantom.is_fail(action_result.get_status()):
self.debug_print("Call API for 'get_rule_info' failed: ", action_result.get_status())
return action_result.get_status()
if not get_rule_info_response:
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_GET_RULE_INFO)
if get_rule_info_response.status_code != 200:
if 'html' in get_rule_info_response.headers.get('Content-Type', ''):
return self._process_html_response(get_rule_info_response, action_result)
if 'json' in get_rule_info_response.headers.get('Content-Type', ''):
status_message = self._get_json_error_message(get_rule_info_response, action_result)
else:
rule_info_response_text = get_rule_info_response.text
status_message = '{0}. HTTP status_code: {1}, reason: {2}'.format(
QRADAR_ERR_GET_RULE_INFO,
get_rule_info_response.status_code,
rule_info_response_text if rule_info_response_text else "Unknown error occurred.")
return action_result.set_status(phantom.APP_ERROR, status_message)
try:
rule_info = get_rule_info_response.json()
action_result.add_data(rule_info)
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
self.debug_print(QRADAR_ERR_INVALID_JSON, error_msg)
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_INVALID_JSON)
summary = action_result.update_summary({})
summary['id'] = rule_info.get('id', None)
summary['name'] = rule_info.get('name', None)
return action_result.set_status(phantom.APP_SUCCESS)
def _list_rules(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# 1. Validation of the input parameters
count = param.get(QRADAR_JSON_COUNT)
ret_val, count = self._validate_integer(action_result, count, QRADAR_COUNT_KEY)
if phantom.is_fail(ret_val):
return action_result.get_status()
rules = list()
headers = dict()
start_index = 0
total_rules = 0
while True:
# If the action is 'offense_details', fetch all the offenses, and count = None in that case
if count:
end_index = min(start_index + QRADAR_QUERY_HIGH_RANGE - 1, count - 1)
else:
end_index = start_index + QRADAR_QUERY_HIGH_RANGE - 1
if start_index > end_index:
break
headers['Range'] = 'items={0}-{1}'.format(start_index, end_index)
start_index += QRADAR_QUERY_HIGH_RANGE
list_rules_response = self._call_api('analytics/rules', 'get', action_result, params=None, headers=headers)
if phantom.is_fail(action_result.get_status()):
self.debug_print("call_api for list rules failed: ", action_result.get_status())
return action_result.get_status()
if not list_rules_response:
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_LIST_RULES)
if list_rules_response.status_code != 200:
if 'html' in list_rules_response.headers.get('Content-Type', ''):
return self._process_html_response(list_rules_response, action_result)
if 'json' in list_rules_response.headers.get('Content-Type', ''):
status_message = self._get_json_error_message(list_rules_response, action_result)
else:
list_rules_response_text = list_rules_response.text
status_message = '{0}. HTTP status_code: {1}, reason: {2}'.format(
QRADAR_ERR_LIST_RULES,
list_rules_response.status_code,
list_rules_response_text if list_rules_response_text else "Unknown error occurred.")
return action_result.set_status(phantom.APP_ERROR, status_message)
try:
rules += list_rules_response.json()
except Exception as e:
error_msg = self._get_error_message_from_exception(e)
self.debug_print(QRADAR_ERR_INVALID_JSON, error_msg)
return action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_INVALID_JSON)
total_rules = len(rules)
if len(rules) < QRADAR_QUERY_HIGH_RANGE:
self.save_progress(QRADAR_PROG_GOT_X_RULES, total_offenses=total_rules)
break
for rule in rules:
action_result.add_data(rule)
summary = action_result.update_summary({})
summary['total_rules'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_ariel_query(self, ariel_query, action_result, obj_result_key=None, offense_id=None, count=None): # noqa: C901
if obj_result_key:
self.save_progress("Executing ariel query to get {0} {1}", obj_result_key,
'' if (not offense_id) else 'for offense: {offense_id}'.format(offense_id=offense_id))
else:
self.save_progress("Executing ariel query")
# First create a search
params = dict()
params['query_expression'] = ariel_query
response = self._call_api(QRADAR_ARIEL_SEARCH_ENDPOINT, 'post', action_result, params=params)
if response and response.text:
response_text = response.text
else:
response_text = "Unknown response returned."
if phantom.is_fail(action_result.get_status()):
self.debug_print("call_api for ariel query failed: ",
action_result.get_status())
return action_result.set_status(phantom.APP_ERROR,
"Error occurred while fetching events for the offense ID: {}. Response code: {}. Response text: {}"
.format(offense_id, None if response is None else response.status_code, response_text))
self.debug_print("Response Code", response.status_code)
self.debug_print("Response Text", response_text)
if response.status_code != 201:
# Error condition
action_result.set_status(phantom.APP_ERROR, QRADAR_ERR_ARIEL_QUERY_FAILED)
try:
resp_text = response.text
if response.json().get("description"):
resp_text = response.json().get("description", "Please provide valid input")
except Exception:
return action_result.set_status(phantom.APP_ERROR, 'Please provide valid input')
if "InOffense function: Error loading Offense" in resp_text:
action_result.append_to_message("Queried offense might not contain data on QRadar")
action_result.append_to_message("\nResponse from QRadar: {0}".format(resp_text))
return action_result.get_status()
try:
response_json = response.json()
except Exception:
return action_result.get_status(phantom.APP_ERROR, QRADAR_ERR_INVALID_JSON)
# Now get the search id
search_id = response_json.get('search_id')
if not | |
'Restall')
if others:
#print('here we go')
co2=co.apply(lambda x:x if x in top_val else 'Restall')
top_val.append('Restall')
else:
co2=co[co.apply(lambda x:True if x in top_val else False)]
#print(co2)
#top_val = [str(x) for x in top_val]
top_val.sort(reverse=False)
return (co2,top_val)
# In[3]:
###plot for X1 for values given in y1 , special vals will come from y1
###only top values of X1 will be plotted
def get_rows_cols(x,**kwargs):
'''
a simple function to return the plot grid required to fit
args:
x : total number of graphs needed , for eg to plot each value in categorical column , unique values count will be x
scols=<number> : parameter to control how many graphs in a row , by default it is 3
'''
scols=kwargs['scols'] if 'scols' in kwargs else 3
if x>scols:
#scols=3 ##capping max columns for subplot as 3
if (x)%scols == 0:
srows=(x)//scols
else:
srows=(x)//scols+1
else:
srows=1
return (scols,srows)
# In[ ]:
def cat_num(df2,**kwargs):
'''Docstring for plotting categorical vs numerical
This method expects a dataframe as input with first column as categorical column and second column as numeric.
Additionally you can provide a third categorical column that can act like a hue.
NAN in x_axis is converted as string 'nan' and treated as a value to give insight on NAN.
But nan in y_axis or hue_col is ignored.
It will output boxplot for second column for each value in first column. It will also give sum of second columns
for each value in X column. Because only apart from sum , rest all datas on mean , median , min , max are found in
boxplot itself
extra arguments
xcap=5 , will cap the maximum categories with top 5 based on its count , default 5
others=True/False - this will add aditional column called others where rest all the values apart from top chosen will go.
x_name='xvalue' , the name that you want in x axis for the first column ,
sometimes the column name are different from the name you want to see in the graph.By default the first column name is taken
y_name='yvalue' , same as x_name , but for Y axis
size_figure=(13,4) , for playing around with the size. depending on size of the screen you may want to change it. default is 13,4
tight layout
hue_col='hue_colname' , if you require an additional layer to add hue to the plot
'''
df = df2.copy(deep=True)
xcap=kwargs['xcap'] if 'xcap' in kwargs else 5
others=kwargs['others'] if 'others' in kwargs else False
x_name = kwargs['x_name'] if 'x_name' in kwargs else df.columns[0]
y_name = kwargs['y_name'] if 'y_name' in kwargs else df.columns[1]
size_figure = kwargs['size_figure'] if 'size_figure' in kwargs else (13,4)
hue_col = kwargs['hue_col'] if 'hue_col' in kwargs else None
df.rename(columns = {df.columns[0]:x_name}, inplace=True)
df.rename(columns = {df.columns[1]:y_name}, inplace=True)
df[x_name]=df[[x_name]].astype('str').fillna('nan')
#vals=df.loc[:,'x1'].unique()
df[x_name],orderv=top_vals(df[x_name],cap=xcap,**kwargs)
fig,axes=plt.subplots(1,2,figsize=size_figure,tight_layout=True)
plt.subplot(1,2,1)
sns.boxplot(data=df,x=x_name,y=y_name,order=orderv,hue=hue_col)
if not hue_col==None:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(rotation=90)
plt.title('box plot for '+x_name+' against '+y_name)
plt.subplot(1,2,2)
sns.barplot(x=x_name, y=y_name, data=df, estimator=sum ,order=orderv, hue=hue_col) ##mean , median , min , max etc are taken care by boxplot
plt.title('bar plot for '+x_name+' against '+y_name)
plt.xticks(rotation=90)
plt.show()
return True
def num_cat(df2,**kwargs):
'''Docstring for plotting numerical vs categorical
This method expects a dataframe as input with first column as numerical column and second column as categorical.
Additionally you can provide a third categorical column that can act like a hue.
NAN in y_axis is converted as string 'nan' and treated as a value to give insight on NAN.
But nan in x_axis or hue_col is ignored.
It will output boxplot for first column for each value in second column. It will also give sum of first columns
for each value in Y column. Because only apart from sum , rest all datas on mean , median , min , max are found in
boxplot itself
extra arguments
ycap=5 , will cap the maximum categories with top 5 based on its count , default 5
others=True/False - this will add aditional column called others where rest all the values apart from top chosen will go.
x_name='xvalue' , the name that you want in x axis for the first column ,
sometimes the column name are different from the name you want to see in the graph.By default the first column name is taken
y_name='yvalue' , same as x_name , but for Y axis
size_figure=(13,4) , for playing around with the size. depending on size of the screen you may want to change it. default is 13,4
tight layout
hue_col='hue_colname' , if you require an additional layer to add hue to the plot
'''
df = df2.copy(deep=True)
xcap=kwargs['ycap'] if 'ycap' in kwargs else 5 ###important switching x and y
others=kwargs['others'] if 'others' in kwargs else False
x_name = kwargs['y_name'] if 'y_name' in kwargs else df.columns[1] ###important switching x and y
y_name = kwargs['x_name'] if 'x_name' in kwargs else df.columns[0] ###important switching x and y
size_figure = kwargs['size_figure'] if 'size_figure' in kwargs else (13,4)
hue_col = kwargs['hue_col'] if 'hue_col' in kwargs else None
df.rename(columns = {df.columns[1]:x_name}, inplace=True) ###important switching x and y
df.rename(columns = {df.columns[0]:y_name}, inplace=True) ###important switching x and y
df[x_name]=df[[x_name]].astype('str').fillna('nan')
#vals=df.loc[:,'x1'].unique()
df[x_name],orderv=top_vals(df[x_name],cap=xcap,**kwargs)
fig,axes=plt.subplots(1,2,figsize=size_figure,tight_layout=True)
plt.subplot(1,2,1)
sns.boxplot(data=df,x=x_name,y=y_name,order=orderv,hue=hue_col)
if not hue_col==None:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(rotation=90)
plt.title('box plot for '+x_name+' against '+y_name)
plt.subplot(1,2,2)
sns.barplot(x=x_name, y=y_name, data=df, estimator=sum ,order=orderv, hue=hue_col) ##mean , median , min , max etc are taken care by boxplot
plt.title('bar plot for '+x_name+' against '+y_name)
plt.xticks(rotation=90)
plt.show()
return True
# In[4]:
def cat_cat(df2,**kwargs):
'''Docstring for plotting categorical vs categorical
This method expects a dataframe as input with first column and second column as categorical column.
Additionally you can provide a third categorical column that can act like a hue.
NAN in x_axis and y_axis is converted as string 'nan' and treated as a value to give insight on NAN.
It will output countplot for second column for each value in second column.Note here , x column axis will remain same here. It is just that the
output for each graph will be for each value in y. A final output will also be given.
extra arguments
xcap=5 , will cap the maximum categories with top 4 based on its count for x axis 1st column , default 5
ycap=5 , will cap the maximum categories with top 4 based on its count for y axis 2nd column , default 5
x_name='xvalue' , the name that you want in x axis for the first column ,
sometimes the column name are different from the name you want to see in the graph.By default the first column name is taken
y_name='yvalue' , same as x_name , but for Y axis
size_figure=(13,4) , for playing around with the size. depending on size of the screen you may want to change it. default is 13,4
tight layout
hue_col='hue_colname' , if you require an additional layer to add hue to the plot
scols=<number> : parameter to control how many graphs in a row , by default it is 3
'''
df = df2.copy(deep=True)
xcap=kwargs['xcap'] if 'xcap' in kwargs else 5
ycap=kwargs['ycap'] if 'ycap' in kwargs else 5
others=kwargs['others'] if 'others' in kwargs else False
x_name = kwargs['x_name'] if 'x_name' in kwargs else df.columns[0]
y_name = kwargs['y_name'] if 'y_name' in kwargs else df.columns[1]
size_figure = kwargs['size_figure'] if 'size_figure' in kwargs else (13,4)
scols=kwargs['scols'] if 'scols' in kwargs else 3 ##used in get_rows_cols
hue_col = kwargs['hue_col'] if 'hue_col' in kwargs else None
df.rename(columns = {df.columns[0]:x_name}, inplace=True)
df.rename(columns = {df.columns[1]:y_name}, inplace=True)
#df[x_name]=df[[x_name]].astype('str').fillna('nan')
df[y_name]=df[[y_name]].astype('str').fillna('nan')
#df[x_name],orderx=top_vals(df[x_name],cap=xcap)
df[y_name],ordery=top_vals(df[y_name],cap=ycap,**kwargs)
#vals=df.loc[:,y_name].unique()
scols,srows=get_rows_cols(len(ordery)+1,**kwargs)
size_figure=(size_figure[0],srows*size_figure[1])
fig,axes=plt.subplots(srows,scols,figsize=size_figure,tight_layout=True)
for ctr,value in enumerate(ordery):
#value=str(value) ##not needed as NULL is already handled with nan replacement for topval
plt.subplot(srows,scols,ctr+1)
###capping the values with top 5 counts of X for the value specified in Y
##else the graph would look pretty bad
#xvalue=df.loc[df['y1'].astype(str)==value,'x1']
xfilter=df.loc[df[y_name].astype(str)==value,x_name]
#top_order=xvalue.value_counts().iloc[:cap].index
#print(xfilter)
#fi=xfilter
xvalue,orderx=top_vals(xfilter,cap=xcap,**kwargs)
#print(xvalue,orderx)
sns.countplot(xvalue,order=orderx,label=value)
#ab=xvalue
#od=orderx
#lb=value
plt.xlabel(x_name)
plt.xticks(rotation=90)
| |
# coding=utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from calendar import timegm
from collections import Counter, OrderedDict
from hashlib import sha256
import typing
from datetime import datetime
from copy import deepcopy
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import re
from xml.etree import ElementTree as ET
from tg import config
from tg import tmpl_context as c, app_globals as g
from tg import request
from paste.deploy.converters import asbool, aslist
import formencode as fe
from webob import exc
import PIL
from ming import schema as S
from ming.utils import LazyProperty
from ming.orm import ThreadLocalORMSession
from ming.orm import session, state, MapperExtension
from ming.orm import FieldProperty, RelationProperty, ForeignIdProperty
from ming.orm.declarative import MappedClass
from allura.lib import helpers as h
from allura.lib import plugin
from allura.lib import exceptions
from allura.lib import security
from allura.lib import validators as v
from allura.lib.decorators import memoize
from allura.lib.security import has_access
from allura.lib.search import SearchIndexable
from allura.lib.utils import is_nofollow_url
from allura.model.types import MarkdownCache
from .session import main_orm_session
from .session import project_orm_session
from .neighborhood import Neighborhood
from .auth import ProjectRole, User
from .timeline import ActivityNode, ActivityObject
from .types import ACL, ACE
from .monq_model import MonQTask
from .filesystem import File
import six
from six.moves import map
if typing.TYPE_CHECKING:
from ming.odm.mapper import Query
log = logging.getLogger(__name__)
# max sitemap entries per tool type
SITEMAP_PER_TOOL_LIMIT = 10
DEFAULT_ICON_WIDTH = 48
class ProjectFile(File):
class __mongometa__:
session = main_orm_session
indexes = [('project_id', 'category')]
query: 'Query[ProjectFile]'
project_id = FieldProperty(S.ObjectId)
category = FieldProperty(str)
caption = FieldProperty(str)
sort = FieldProperty(int)
class ProjectCategory(MappedClass):
class __mongometa__:
session = main_orm_session
name = str('project_category')
query: 'Query[ProjectCategory]'
_id = FieldProperty(S.ObjectId)
parent_id = FieldProperty(S.ObjectId, if_missing=None)
name = FieldProperty(str)
label = FieldProperty(str, if_missing='')
description = FieldProperty(str, if_missing='')
@property
def parent_category(self):
return self.query.get(_id=self.parent_id)
@property
def subcategories(self):
return self.query.find(dict(parent_id=self._id)).all()
class TroveCategoryMapperExtension(MapperExtension):
def after_insert(self, obj, state, sess):
g.post_event('trove_category_created', obj.trove_cat_id, flush_immediately=True)
def after_update(self, obj, state, sess):
g.post_event('trove_category_updated', obj.trove_cat_id, flush_immediately=True)
def after_delete(self, obj, state, sess):
g.post_event('trove_category_deleted', obj.trove_cat_id, flush_immediately=True)
class TroveCategory(MappedClass):
class __mongometa__:
session = main_orm_session
name = str('trove_category')
extensions = [TroveCategoryMapperExtension]
indexes = ['trove_cat_id', 'trove_parent_id', 'shortname', 'fullpath']
query: 'Query[TroveCategory]'
_id = FieldProperty(S.ObjectId)
trove_cat_id = FieldProperty(int, if_missing=None)
trove_parent_id = FieldProperty(int, if_missing=None)
shortname = FieldProperty(str, if_missing='')
fullname = FieldProperty(str, if_missing='')
fullpath = FieldProperty(str, if_missing='')
parent_only = FieldProperty(bool, if_missing=False)
show_as_skill = FieldProperty(bool, if_missing=True)
@property
def parent_category(self):
return self.query.get(trove_cat_id=self.trove_parent_id)
@property
def subcategories(self):
return sorted(self.query.find(dict(trove_parent_id=self.trove_cat_id)).all(),
key=lambda t: t.fullname.lower())
@property
def children(self):
return sorted(self.query.find({'fullpath': re.compile('^' + re.escape(self.fullpath) + ' ::')}).all(),
key=lambda t: t.fullpath.lower())
@property
def type(self):
trove = self
while trove.trove_parent_id != 0:
trove = trove.parent_category
return trove.shortname
@property
def fullpath_within_type(self):
'remove first section of full path, and use nicer separator'
return ' » '.join(self.fullpath.split(' :: ')[1:])
@classmethod
def by_trove_cat_ids(cls, *ids):
return cls.query.find({'trove_cat_id': {'$in': ids}}).all()
def __json__(self):
return dict(
id=self.trove_cat_id,
shortname=self.shortname,
fullname=self.fullname,
fullpath=self.fullpath,
)
class ProjectNameFieldProperty(FieldProperty):
"""
Make project names be the username instead of u/whatever, when a user-project.
Particularly nice if the username and user-project name don't match exactly.
(This is a python "descriptor")
"""
def __get__(self, instance, cls=None):
if instance:
owning_user = instance.user_project_of
if owning_user:
return owning_user.username
return super(ProjectNameFieldProperty, self).__get__(instance, cls)
class Project(SearchIndexable, MappedClass, ActivityNode, ActivityObject):
'''
Projects contain tools, subprojects, and their own metadata. They live
in exactly one :class:`~allura.model.neighborhood.Neighborhood`
'''
_perms_base = ['read', 'update', 'admin', 'create']
_perms_init = _perms_base + ['register']
class __mongometa__:
session = main_orm_session
name = str('project')
indexes = [
'name',
'neighborhood_id',
('neighborhood_id', 'name'),
'shortname',
'parent_id',
('deleted', 'shortname', 'neighborhood_id'),
('neighborhood_id', 'is_nbhd_project', 'deleted')]
unique_indexes = [('neighborhood_id', 'shortname')]
query: 'Query[Project]'
type_s = 'Project'
# Project schema
_id = FieldProperty(S.ObjectId)
parent_id = FieldProperty(S.ObjectId, if_missing=None)
neighborhood_id = ForeignIdProperty(Neighborhood)
shortname = FieldProperty(str)
name: str = ProjectNameFieldProperty(str)
show_download_button = FieldProperty(S.Deprecated)
short_description = FieldProperty(str, if_missing='')
summary = FieldProperty(str, if_missing='')
description = FieldProperty(str, if_missing='')
description_cache = FieldProperty(MarkdownCache)
homepage_title = FieldProperty(str, if_missing='')
external_homepage = FieldProperty(str, if_missing='')
video_url = FieldProperty(str, if_missing='')
support_page = FieldProperty(str, if_missing='')
support_page_url = FieldProperty(str, if_missing='')
socialnetworks = FieldProperty([dict(socialnetwork=str, accounturl=str)])
removal = FieldProperty(str, if_missing='')
moved_to_url = FieldProperty(str, if_missing='')
removal_changed_date = FieldProperty(datetime, if_missing=datetime.utcnow)
database = FieldProperty(S.Deprecated)
database_uri = FieldProperty(S.Deprecated)
is_root = FieldProperty(bool)
acl = FieldProperty(ACL(permissions=_perms_init))
neighborhood_invitations = FieldProperty([S.ObjectId])
neighborhood = RelationProperty(Neighborhood)
app_configs = RelationProperty('AppConfig')
category_id = FieldProperty(S.ObjectId, if_missing=None)
deleted = FieldProperty(bool, if_missing=False)
labels = FieldProperty([str])
last_updated = FieldProperty(datetime, if_missing=None)
tool_data = FieldProperty({str: {str: None}}) # entry point: prefs dict
ordinal = FieldProperty(int, if_missing=0)
database_configured = FieldProperty(bool, if_missing=True)
_extra_tool_status = FieldProperty([str])
trove_root_database = FieldProperty([S.ObjectId])
trove_developmentstatus = FieldProperty([S.ObjectId])
trove_audience = FieldProperty([S.ObjectId])
trove_license = FieldProperty([S.ObjectId])
trove_os = FieldProperty([S.ObjectId])
trove_language = FieldProperty([S.ObjectId])
trove_topic = FieldProperty([S.ObjectId])
trove_natlanguage = FieldProperty([S.ObjectId])
trove_environment = FieldProperty([S.ObjectId])
tracking_id = FieldProperty(str, if_missing='')
is_nbhd_project = FieldProperty(bool, if_missing=False)
features = FieldProperty([str])
rating = FieldProperty(float, if_missing=0)
# transient properties
notifications_disabled = False
@property
def activity_name(self):
return self.name
@property
def permissions(self):
if self.shortname == '--init--':
return self._perms_init
else:
return self._perms_base
def parent_security_context(self):
'''ACL processing should proceed up the project hierarchy.'''
return self.parent_project
@LazyProperty
def allowed_tool_status(self):
return ['production'] + self._extra_tool_status
@h.exceptionless([], log)
def sidebar_menu(self):
from allura.app import SitemapEntry
result = []
if not self.is_root:
p = self.parent_project
result.append(SitemapEntry('Parent Project'))
result.append(SitemapEntry(p.name or p.script_name, p.script_name))
sps = self.direct_subprojects
if sps:
result.append(SitemapEntry('Child Projects'))
result += [
SitemapEntry(sp.name or sp.script_name, sp.script_name)
for sp in sps]
return result
def troves_by_type(self, trove_type):
trove_key = 'trove_%s' % trove_type
troves = getattr(self, trove_key) if hasattr(self, trove_key) else None
if troves:
return TroveCategory.query.find({'_id': {'$in': troves}}).all()
else:
return []
def all_troves(self):
'''
Returns a dict of human-readable root troves => [categories]
'''
troves = {}
for attr in dir(self):
if attr.startswith('trove_'):
trove_type = attr.replace('trove_', '')
nice_name = dict(
natlanguage='translation',
root_database='database',
).get(trove_type, trove_type)
troves[nice_name] = self.troves_by_type(trove_type)
return troves
def get_tool_data(self, tool, key, default=None):
return self.tool_data.get(tool, {}).get(key, default)
def set_tool_data(self, tool, **kw):
d = self.tool_data.setdefault(tool, {})
d.update(kw)
state(self).soil()
def admin_menu(self):
return []
@property
def script_name(self):
url = self.url()
if '//' in url:
return url.rsplit('//')[-1]
else:
return url
def url(self, use_userproject_shortname=False):
if self.is_nbhd_project:
return self.neighborhood.url()
shortname = self.shortname[len(self.neighborhood.shortname_prefix):]
if self.neighborhood.url_prefix == '/u/' and not use_userproject_shortname:
user = self.user_project_of
if user:
return user.url()
url = self.neighborhood.url_prefix + shortname + '/'
if url.startswith('//'):
try:
return request.scheme + ':' + url
except TypeError: # pragma no cover
return 'http:' + url
else:
return url
def icon_url(self):
icon_url = config.get('static.icon_base', '') + self.url() + 'icon'
return icon_url
def best_download_url(self):
provider = plugin.ProjectRegistrationProvider.get()
return provider.best_download_url(self)
def get_screenshots(self):
return ProjectFile.query.find(dict(
project_id=self._id,
category='screenshot')).sort('sort').all()
def save_icon(self, filename, file_input, content_type=None):
icon_orig, icon_thumb = ProjectFile.save_image(
filename, file_input, content_type=content_type,
square=True, thumbnail_size=(48, 48),
thumbnail_meta=dict(project_id=self._id, category='icon'),
save_original=True,
original_meta=dict(project_id=self._id, category='icon_original'),
convert_bmp=True,
)
if icon_orig:
# store the dimensions so we don't have to read the whole image each time we need to know
icon_orig_img = PIL.Image.open(icon_orig.rfile())
self.set_tool_data('allura', icon_original_size=icon_orig_img.size)
try:
# calc and save icon file hash, for better cache busting purposes
file_input.seek(0)
file_bytes = file_input.read()
file_sha256 = sha256(file_bytes).hexdigest()
self.set_tool_data('allura', icon_sha256=file_sha256)
except Exception as ex:
log.exception('Failed to calculate sha256 for icon file for {}'.format(self.shortname))
return True
return False
@property
def icon(self):
return self.icon_sized(DEFAULT_ICON_WIDTH)
@memoize
def icon_sized(self, w):
allowed_sizes = list(map(int, aslist(config.get('project_icon_sizes', '16 24 32 48 64 72 96'))))
if w not in allowed_sizes:
raise ValueError('Width must be one of {} (see project_icon_sizes in your .ini file)'.format(allowed_sizes))
if w == DEFAULT_ICON_WIDTH:
icon_cat_name = 'icon'
else:
icon_cat_name = 'icon-{}'.format(w)
sized = ProjectFile.query.get(project_id=self._id, category=icon_cat_name)
if not sized and w != DEFAULT_ICON_WIDTH:
orig = self.icon_original
if not orig:
return self.icon
sized = orig.save_thumbnail(filename='',
image=PIL.Image.open(orig.rfile()),
content_type=orig.content_type,
thumbnail_size=(w, w),
thumbnail_meta=dict(project_id=c.project._id, category=icon_cat_name),
square=True,
)
return sized
@LazyProperty
def icon_original(self):
return ProjectFile.query.get(project_id=self._id, category='icon_original')
@LazyProperty
def icon_max_size(self):
stored_original_size = self.get_tool_data('allura', 'icon_original_size')
if stored_original_size:
# max not min, since non-square images get white padding added
return max(stored_original_size)
elif self.icon:
return DEFAULT_ICON_WIDTH
else:
return None
@LazyProperty
def icon_timestamp(self):
return timegm(self.icon._id.generation_time.timetuple())
@property
def description_html(self):
return g.markdown.cached_convert(self, 'description')
@property
def parent_project(self):
if self.is_root:
return None
return self.query.get(_id=self.parent_id)
def _get_private(self):
"""Return True if this project is private, else False."""
role_anon = | |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import os
import collections
import jsonpath_ng
import re
from typing import List, Set
import shutil
from ruamel.yaml import YAML
from git import Repo, InvalidGitRepositoryError, NoSuchPathError
from shrike.build.core.command_line import Command
from shrike.build.utils.utils import (
create_catalog_stub,
add_file_to_catalog,
write_two_catalog_files,
delete_two_catalog_files,
)
from pathlib import Path
import yaml
import urllib.parse
import uuid
from urllib.parse import urlparse
log = logging.getLogger(__name__)
ALLOWED_CONTAINER_REGISTRIES = ["polymerprod.azurecr.io"]
ALLOWED_PACKAGE_FEEDS = [
"https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/"
]
class Prepare(Command):
def __init__(self):
super().__init__()
self._component_statuses = {}
def folder_path(self, file: str) -> str:
"""
Return the normalized path of the directory containing a file.
"""
return self.normalize_path(Path(file).parent, directory=True)
def all_files_in_snapshot(self, manifest: str) -> List[str]:
"""
Return a list of all normalized files in the snapshot. The input
(`manifest`) is assumed to be some file, whether AML-style component
spec or Aether-style auto-approval manifest, in the "root" of the
snapshot.
"""
folder_path = self.folder_path(manifest)
log.info("Absolute path for current component is: " + folder_path)
# Generate a list of all files in this components folder (including subdirectories)
rv = []
# Make sure we pick up Linux-style "hidden" files like .amlignore and
# hidden "directories", as well as hidden files in hidden directories.
# https://stackoverflow.com/a/65205404
# https://stackoverflow.com/a/41447012
for root, _, file_paths in os.walk(folder_path):
for file in file_paths:
file_path = os.path.join(root, file)
normalized_path = self.normalize_path(file_path)
rv.append(normalized_path)
return rv
def build_all_components(self, files: List[str]) -> List[str]:
"""
For each component specification file, run `az ml component build`,
and register the status (+ register error if build failed). Returns the
list of "built" component files.
"""
rv = []
for component in files:
path = Path(component)
rv.append(str(path.parent / ".build" / path.name))
build_component_success = self.execute_azure_cli_command(
f"ml component build --file {component}"
)
if build_component_success:
log.info(f"Component {component} is built.")
else:
self.register_error(f"Error when building component {component}.")
return rv
def create_catalog_files(self, files: List[str]):
"""
Create the appropriate kind of catalog file(s), using the configured
method ("aml" or "aether").
"""
signing_mode = self.config.signing_mode
if signing_mode == "aml":
self.create_catalog_files_for_aml(files)
elif signing_mode == "aether":
self.create_catalog_files_for_aether(files)
else:
raise ValueError(f"Invalid signing_mode provided: '{signing_mode}'")
def create_catalog_files_for_aether(self, files: List[str]) -> None:
"""
Create Aether-friendly .cat files, by first creating a CDF file, then
finding and running `makecat.exe` to create the catalog file.
"""
makecat_default = self.config.makecat_default
makecat_directory = self.config.makecat_directory
makecat = os.path.join(makecat_directory, makecat_default)
if not os.path.exists(makecat):
log.info(f"Default makecat location {makecat} does not exist")
for path in Path(makecat_directory).rglob("makecat.exe"):
if "x64" in str(path).lower():
makecat = path
break
log.info(f"Makecat location: {makecat}")
for file in files:
directory = os.path.dirname(file)
name = os.path.split(directory)[-1]
cat_name = f"{name}.cat"
cdf_name = f"{name}.cdf"
path_to_cdf = os.path.join(directory, cdf_name)
cdf_contents = f"""[CatalogHeader]
Name={cat_name}
PublicVersion=0x0000001
EncodingType=0x00010001
PageHashes=true
CATATTR1=0x00010001:OSAttr:2:6.2
[CatalogFiles]
"""
files_in_module = self.all_files_in_snapshot(file)
hash_lines = map(lambda p: f"<HASH>{p}={p}", files_in_module)
all_hashes = "\n".join(hash_lines)
cdf_contents += all_hashes
log.info(f"CDF file contents:\n{cdf_contents}")
with open(path_to_cdf, "w", encoding="ascii") as output:
output.write(cdf_contents)
success = self.execute_command([str(makecat), path_to_cdf, "-v"])
if success:
log.info(f"Creating Aether catalog files for {name} is successful.")
shutil.move(cat_name, directory)
else:
self.register_error(
f"Error when creating Aether catalog files for {name}."
)
log.info(f"Removing {cdf_name}")
os.remove(path_to_cdf)
log.info(f"Finish creating aether catalog files for {name}.")
def create_catalog_files_for_aml(self, files: List[str]) -> None:
"""
Create AML-friendly catalog.json and catalog.json.sig files, using
SHA-256 hash.
"""
# For each component spec file in the input list, we'll do the following...
for f in files:
log.info(f"Processing file {f}")
component_folder_path = self.folder_path(f)
# remove catalog files if already present
log.info("Deleting old catalog files if present")
delete_two_catalog_files(component_folder_path)
files_for_catalog = self.all_files_in_snapshot(f)
log.info("The following list of files will be added to the catalog.")
log.info(files_for_catalog)
# Prepare the catlog stub: {'HashAlgorithm': 'SHA256', 'CatalogItems': {}}
catalog = create_catalog_stub()
# Add an entry to the catalog for each file
for file_for_catalog in files_for_catalog:
catalog = add_file_to_catalog(
file_for_catalog, catalog, component_folder_path
)
# order the CatalogItems dictionary
catalog["CatalogItems"] = collections.OrderedDict(
sorted(catalog["CatalogItems"].items())
)
# Write the 2 catalog files
log.info(catalog)
write_two_catalog_files(catalog, component_folder_path)
log.info("Finished creating catalog files.")
def find_component_specification_files(self) -> List[str]:
"""
Find the list of "active" component specification files using the
configured method ("all" or "smart").
"""
activation_method = self.config.activation_method
if activation_method == "all":
rv = self.find_component_specification_files_using_all()
elif activation_method == "smart":
rv = self.find_component_specification_files_using_smart()
else:
raise ValueError(
f"Invalid activation_method provided: '{activation_method}'"
)
return rv
def add_repo_and_last_pr_to_tags(self, files: List[str]) -> List[str]:
[repo, current_branch, compliant_branch] = self.identify_repo_and_branches()
repo_path = repo.remotes.origin.url
for file in files:
with open(file, "r") as spec_file:
spec = yaml.load(spec_file, Loader=yaml.FullLoader)
if not isinstance(spec, dict):
continue
last_commit = next(repo.iter_commits(paths=file, max_count=1))
last_commit_id = last_commit.hexsha
last_commit_message = last_commit.summary
path_to_component = os.path.relpath(
os.path.split(file)[0], repo.working_dir
)
link_to_commit = (
repo_path
+ "?version=GC"
+ last_commit_id
+ "&path="
+ urllib.parse.quote(path_to_component, safe="")
)
# update description
cur_description = spec.get("description")
new_description = f"[link to commit]({link_to_commit})"
if cur_description:
spec["description"] = cur_description + "\n----\n" + new_description
else:
spec["description"] = new_description
# update tags
new_tag = {
"repo": repo_path,
"last_commit_id": last_commit_id,
"last_commit_message": last_commit_message,
"path_to_component": path_to_component,
}
cur_tag = spec.get("tags")
if cur_tag is None:
cur_tag = new_tag
else:
cur_tag.update(new_tag)
spec["tags"] = cur_tag
with open(file, "w") as spec_file:
yaml.dump(spec, spec_file, sort_keys=False)
return files
def find_component_specification_files_using_all(self, dir=None) -> List[str]:
"""
Find all component specification files in the configured working
directory matching the configured glob. Return the absolute paths
of these files in the format of a list of string.
"""
if dir is None:
dir = self.config.working_directory
all_spec_yaml_files_absolute_paths = [
str(p.absolute())
for p in Path(dir).glob(self.config.component_specification_glob)
]
return all_spec_yaml_files_absolute_paths
def find_component_specification_files_using_smart(self) -> List[str]:
"""
This function returns the list of components (as a list of absolute paths) potentially affected by the latest commit.
"""
log.info(
"Determining which components are potentially affected by the current change."
)
[repo, current_branch, compliant_branch] = self.identify_repo_and_branches()
modified_files = self.get_modified_files(repo, current_branch, compliant_branch)
active_components = self.infer_active_components_from_modified_files(
modified_files
)
return active_components
def identify_repo_and_branches(self):
"""
This function returns the current repository, along with the name of the current and compliant branches [repo, current_branch, compliant_branch]. Throws if no repo can be found.
"""
# identify the repository
curr_path = Path(self.config.working_directory).resolve()
try:
repo = Repo(curr_path, search_parent_directories=True)
log.info("Found a valid repository in " + repo.git_dir)
except (InvalidGitRepositoryError, NoSuchPathError):
message = (
str(curr_path)
+ " or its parents do not contain a valid repo path or cannot be accessed."
)
raise Exception(message)
try:
current_branch = str(
repo.head.ref
) # when running from our build the repo head is detached so this will throw an exception
except TypeError:
current_branch = os.environ.get("BUILD_SOURCEBRANCH") or os.environ.get(
"GITHUB_REF"
)
log.info("The current branch is: '" + str(current_branch) + "'.")
# Identify the compliant branch
if not (self.config.compliant_branch.startswith("^refs/heads/")) or not (
self.config.compliant_branch.endswith("$")
):
raise Exception(
"The name of the compliant branch found in the config file should start with '^refs/heads/' and end with '$'. Currently it is: '"
+ self.config.compliant_branch
+ "'."
)
else:
compliant_branch = self.config.compliant_branch.replace("^refs/heads/", "")[
0:-1
]
log.info("The compliant branch is: '" + compliant_branch + "'.")
return [repo, current_branch, compliant_branch]
def get_modified_files(self, repo, current_branch, compliant_branch) -> Set[str]:
"""
This function returns the paths of files that have been modified. 3 scenarios are supported.\n
1/ 'Build - before Merge'; when the 'prepare' command is run as part of a build, but before the actual merge (in this case, the name of the current branch starts with 'refs/pull/' - this is the default Azure DevOps behavior).\n
2/ 'Build - after Merge'; when the 'prepare' command is run as part of a build, after the actual merge (in this case, the name of the current branch is the same as the name of the compliant branch).\n
3/ 'Manual'; when the prepare command is run manually (typically before publishing the PR).
"""
res = set()
# Grab the diff differently depending on the scenario
if current_branch.replace("refs/heads/", "") == compliant_branch:
# 'Build - after Merge' case: we will take the diff between the
# tree of the latest commit to the compliant branch, and the tree
# of the previous commit to the compliant branch corresponding to a
# PR (we assume the commit summary starts with 'Merged PR')
log.info(
"We are in the 'Build - after Merge' case (the current branch is the compliant branch)."
)
current_commit = self.get_compliant_commit_corresponding_to_pull_request(repo, compliant_branch)
self.log_commit_info(current_commit, "Current commit to compliant branch")
previous_commit = (
self.get_previous_compliant_commit_corresponding_to_pull_request(
current_commit,
consider_current_commit=False,
)
)
self.log_commit_info(
previous_commit, "Previous PR commit to compliant branch"
)
elif current_branch.startswith("refs/pull/"):
# 'Build | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import os
import json
import types
import re
from functools import partial
import itertools
import logging
import inspect
import copy
from socket import gethostname
import cgi
import io
from datatypes import (
Url as BaseUrl,
Host,
HTTPHeaders as BaseHeaders,
HTTPEnviron as Environ,
)
from .compat import *
from .decorators.utils import property # must be .utils because circular dep
from .utils import (
AcceptHeader,
MimeType,
Base64,
Deepcopy,
FileWrapper,
)
logger = logging.getLogger(__name__)
class Headers(BaseHeaders):
def is_plain(self):
"""return True if body's content-type is text/plain"""
ct = self.get("Content-Type", "")
return "plain" in ct
def is_json(self):
"""return True if body's content-type is application/json"""
ct = self.get("Content-Type", "")
return "json" in ct
def is_urlencoded(self):
"""return True if body's content-type is application/x-www-form-urlencoded"""
ct = self.get("Content-Type", "")
return ("form-urlencoded" in ct) or ("form-data" in ct)
def is_multipart(self):
"""return True if body's content-type is multipart/form-data"""
ct = self.get("Content-Type", "")
return "multipart" in ct
class Body(cgi.FieldStorage, object):
"""Wraps the default FieldStorage to handle json and also recovers when the
input fails to parse correctly
https://github.com/python/cpython/blob/2.7/Lib/cgi.py
https://github.com/python/cpython/blob/3.8/Lib/cgi.py
"""
FieldStorageClass = cgi.FieldStorage
@property(cached="_args")
def args(self):
return getattr(self, "json_args", [])
@property(cached="_kwargs")
def kwargs(self):
body_kwargs = {}
body_kwargs.update(getattr(self, "json_kwargs", {}))
# we only have a list when we had a multiport or data-form submission
if getattr(self, "list"):
for field_name in self.keys():
body_field = self[field_name]
if body_field.filename:
body_kwargs[field_name] = FileWrapper(
body_field.file,
name=body_field.filename,
filename=body_field.filename,
type=body_field.type,
raw=body_field,
)
else:
body_kwargs[field_name] = body_field.value
return body_kwargs
def __init__(self, fp, request, **kwargs):
if request.headers.get('transfer-encoding', "").lower().startswith("chunked"):
raise IOError("Chunked bodies are not supported")
self.request = request
# py3 compatibility
self.encoding = request.encoding
self.errors = "replace"
self.max_num_fields = None
self.name = self.filename = self.value = None
self.length = int(request.headers.get("CONTENT_LENGTH", -1))
self.fp = fp
self.list = None
if self.length > 0:
if request.is_json():
self.read_json()
else:
kwargs.setdefault("keep_blank_values", True)
# so FieldStorage parses the body in the constructor and if it fails
# then the body instance won't be created, so this is set to True and
# the error is handled in read_urlencoded
kwargs["strict_parsing"] = True
super(Body, self).__init__(
fp=fp,
headers=request.headers,
environ=request.environ,
**kwargs
)
def is_plain(self):
"""return True if body's content-type is text/plain"""
return self.request.headers.is_plain()
def is_json(self):
"""return True if body's content-type is application/json"""
return self.request.headers.is_json()
def is_urlencoded(self):
"""return True if body's content-type is application/x-www-form-urlencoded"""
return self.request.headers.is_urlencoded()
def is_multipart(self):
"""return True if body's content-type is multipart/form-data"""
return self.request.headers.is_multipart()
def read_json(self):
body = self.fp.read(self.length)
self.file = io.BytesIO(body)
body_args = []
body_kwargs = {}
b = json.loads(body)
if isinstance(b, list):
body_args = b
elif isinstance(b, dict):
body_kwargs = b
else:
body_args = [b]
self.json_args = body_args
self.json_kwargs = body_kwargs
def read_urlencoded(self):
"""Internal: read data in query string format."""
body = self.fp.read(self.length)
self.file = io.BytesIO(body)
qs = String(body, self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
try:
if is_py2:
query = parse.parse_qsl(
qs,
self.keep_blank_values,
self.strict_parsing,
)
else:
query = parse.parse_qsl(
qs,
self.keep_blank_values,
self.strict_parsing,
encoding=self.encoding,
errors=self.errors,
max_num_fields=self.max_num_fields
)
except ValueError:
# if the right headers were sent then this should error
if self.is_urlencoded() or self.is_multipart():
raise
else:
self.list = [cgi.MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
def make_file(self, *args, **kwargs):
return io.BytesIO()
def seek(self, *args, **kwargs):
return self.file.seek(*args, **kwargs)
def read(self, *args, **kwargs):
return self.file.read(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.file.tell(*args, **kwargs)
class Url(BaseUrl):
"""a url object on steroids, this is here to make it easy to manipulate urls
we try to map the supported fields to their urlparse equivalents, with some additions
https://tools.ietf.org/html/rfc3986.html
given a url http://user:pass@foo.com:1000/bar/che?baz=boom#anchor
with a controller: Bar
.scheme = http
.netloc = user:pass@foo.<EMAIL>:1000
.hostloc = foo.com:1000
.hostname = foo.com
.host() = http://foo.com
.port = 1000
.base = http://user:pass@foo.com:1000/bar/che
.fragment = anchor
.anchor = fragment
.uri = /bar/che?baz=boom#anchor
.host(...) = http://foo.com/...
.base(...) = http://foo.com/bar/che/...
.controller(...) = http://foo.com/bar/...
"""
class_path = ""
module_path = ""
def module(self, *paths, **query_kwargs):
"""create a new Url instance using the module path as a base
:param *paths: list, the paths to append to the module path
:param **query_kwargs: dict, any query string params to add
:returns: new Url instance
"""
kwargs = self._normalize_params(*paths, **query_kwargs)
if self.module_path:
if "path" in kwargs:
paths = self.normalize_paths(self.module_path, kwargs["path"])
kwargs["path"] = "/".join(paths)
else:
kwargs["path"] = self.module_path
return self.create(self.root, **kwargs)
def controller(self, *paths, **query_kwargs):
"""create a new url object using the controller path as a base
if you have a controller `foo.BarController` then this would create a new
Url instance with `host/foo/bar` as the base path, so any *paths will be
appended to `/foo/bar`
:example:
# controller foo.Bar(Controller)
print url # http://host.com/foo/bar/some_random_path
print url.controller() # http://host.com/foo/bar
print url.controller("che", boom="bam") # http://host/foo/bar/che?boom=bam
:param *paths: list, the paths to append to the controller path
:param **query_kwargs: dict, any query string params to add
:returns: new Url instance
"""
kwargs = self._normalize_params(*paths, **query_kwargs)
if self.class_path:
if "path" in kwargs:
paths = self.normalize_paths(self.class_path, kwargs["path"])
kwargs["path"] = "/".join(paths)
else:
kwargs["path"] = self.class_path
return self.create(self.root, **kwargs)
class Http(object):
header_class = Headers
def __init__(self):
self.headers = Headers()
def has_header(self, header_name):
"""return true if the header is set"""
return header_name in self.headers
def set_headers(self, headers):
"""replace all headers with passed in headers"""
self.headers = Headers(headers)
def add_headers(self, headers, **kwargs):
self.headers.update(headers, **kwargs)
def set_header(self, header_name, val):
self.headers[header_name] = val
def add_header(self, header_name, val, **params):
self.headers.add_header(header_name, val, **params)
def get_header(self, header_name, default_val=None):
"""try as hard as possible to get a a response header of header_name,
rreturn default_val if it can't be found"""
return self.headers.get(header_name, default_val)
def find_header(self, header_names, default_val=None):
"""given a list of headers return the first one you can, default_val if you
don't find any
:param header_names: list, a list of headers, first one found is returned
:param default_val: mixed, returned if no matching header is found
:returns: mixed, the value of the header or default_val
"""
ret = default_val
for header_name in header_names:
if self.has_header(header_name):
ret = self.get_header(header_name, default_val)
break
return ret
def _parse_query_str(self, query):
"""return name=val&name2=val2 strings into {name: val} dict"""
u = Url(query=query)
return u.query_kwargs
def _build_body_str(self, b):
# we are returning the body, let's try and be smart about it and match content type
ct = self.get_header('content-type')
if ct:
ct = ct.lower()
if ct.rfind("json") >= 0:
if b:
b = json.dumps(b)
else:
b = None
elif ct.rfind("x-www-form-urlencoded") >= 0:
b = urlencode(b, doseq=True)
return b
def copy(self):
"""nice handy wrapper around the deepcopy"""
return copy.deepcopy(self)
def __deepcopy__(self, memodict=None):
memodict = memodict or {}
memodict.setdefault("controller_info", getattr(self, "controller_info", {}))
memodict.setdefault("body", getattr(self, "body", None))
return Deepcopy(ignore_private=True).copy(self, memodict)
def is_json(self):
return self.headers.is_json()
class Request(Http):
'''
common interface that endpoints uses to decide what to do with the incoming request
an instance of this class is used by the endpoints Call instance to decide where endpoints
should route requests, so, many times, you'll need to write a glue function that takes however
your request data is passed to Python and convert it into a Request instance that endpoints can
understand
properties --
headers -- a dict of all the request headers in { header_name: header_val } format
path -- the /path/part/of/the/url
path_args -- tied to path, it's path, but divided by / so all the path bits are returned as a list
query -- the ?name=val portion of a url
query_kwargs -- tied to query, the values in query but converted to a dict {name: val}
'''
environ = None
"""holds all the values that aren't considered headers but usually get passed with the request"""
raw_request = None
"""the original raw request that was filtered through one of the interfaces"""
method = None
"""the http method (GET, POST)"""
controller_info = None
"""will hold the controller information for the request, populated from the Call"""
body_class = Body
"""see create_body()"""
@property
def accept_content_type(self):
"""Return the requested content type
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
:returns: string, empty if a suitable content type wasn't found, this will
only check the first accept content type and then only if that content
type has no wildcards
"""
v = ""
accept_header = self.get_header('accept', "")
if accept_header:
a = AcceptHeader(accept_header)
for mt in a:
# we only care about the first value, and only if it has | |
condditions!'
if opts == 'b2' :
x0 , y0 = gr.point ( 0 )
x1 , y1 = gr.point ( 1 )
x2 , y2 = gr.point ( 2 )
elif opts == 'e2' :
x0 , y0 = gr.point ( -3 )
x1 , y1 = gr.point ( -2 )
x2 , y2 = gr.point ( -1 )
dx01 = x0 - x1
dx02 = x0 - x2
dx12 = x1 - x2
return 2 * ( y0 * dx12 - y1 * dx02 + y2 * dx01 ) / ( dx01 * dx02 * dx12 )
ROOT.TGraph .bcond = _gr_bcond_
# =============================================================================
## Convert the histogram to into "Laffery-Wyatt" graph
# See <NAME> and <NAME>,
# ``Where to stick your data points: The treatment of measurements within wide bins,''
# Nucl. Instrum. Meth. A355, 541 (1995).
# @param histo the histogram
# @param func the model
# @attention: the model can be any reasonable model.
# No need in multiplicative and additive terms:
# the affine transformations do not affect the result.
# e.g. following three graphs are equivalent:
# @code
# >>> histo = ...
# >>> gr1 = histo.lw_graph ( lambda x : math.exp(-x) )
# >>> gr2 = histo.lw_graph ( lambda x : 100*math.exp(-x) )
# >>> gr3 = histo.lw_graph ( lambda x : 100*math.exp(-x)-10 )
# @endcode
# If no reasonable model is known, the splines can be used instead:
# @code
# >>> histo =
# >>> spline = histo.(p,i,d)spline( .... )
# >>> graph = histo.lw_graph ( spline[2] )
# @endcode
# @see https://doi.org/10.1016/0168-9002(94)01112-5
# @author <NAME> <EMAIL>
# @date 2014-12-08
def _lw_graph_ ( histo , func ) :
"""Convert the histogram to into ``Laffery-Wyatt'' graph
See <NAME> and <NAME>,
``Where to stick your data points: The treatment of measurements within wide bins,''
Nucl. Instrum. Meth. A355, 541 (1995).
>>> histo = ... ## the histogram
## the explicit model:
>>> graph = histo.lw_graph ( lambda x : math.exp(-x) )
## use splines:
>>> spline = histo.(p,i,d)spline( .... )
>>> graph = histo.lw_graph ( spline[2] )
>>> histo.Draw('e1')
>>> graph.Draw('e1p same')
"""
#
## book graph
#
graph = ROOT.TGraphAsymmErrors( len ( histo ) - 2 )
#
## copy attributes
#
copy_graph_attributes ( histo , graph )
#
## start actual evaluations
#
from ostap.math.intergal import integral as _integral
from ostap.math.rootfinder import findroot
for item in histo.items () :
ibin = item[0]
x = item[1]
y = item[2]
yv = y.value()
ye = y.error()
xv = x.value()
xe = x.error()
xmx = xv + xe
xmn = xv - xe
#
## solve the equation f(x) = 1/dx*int(f,xmin,xmax)
#
## 1) calculate int(f,xmin,xmax)
fint = _integral ( lambda x : float ( func ( x ) ) , xmn , xmx )
## bin-width
dx = 2.0 * xe
fx = float ( fint[0]/dx )
fxmin = float ( func ( xmn ) )
fxmax = float ( func ( xmx ) )
if 0 <= ( fxmin - fx ) * ( fxmax - fx ) :
logger.warning('Lafferty-Wyatt graph: invalid point: %s ' % x )
r0 = x.value()
else :
## solve the equation f(x) - 1/dx*int(f,xmin,xmax) = 0
r0 = findroot ( lambda x : ( float ( func ( x ) ) - fx ) ,
xmn ,
xmx ,
xtol = 0.005 * dx )
## fill graph
ip = ibin - 1 ## different conventions for TGraph and TH1
xep = xmx - r0
xen = r0 - xmn
graph.SetPoint ( ip , r0 , yv )
graph.SetPointError ( ip , xen , xep , ye, ye )
return graph
ROOT.TH1D.lw_graph = _lw_graph_
ROOT.TH1F.lw_graph = _lw_graph_
# =============================================================================
## Convert the histogram to into "Laffery-Wyatt" graph
# See <NAME> and <NAME>,
# ``Where to stick your data points: The treatment of measurements within wide bins,''
# Nucl. Instrum. Meth. A355, 541 (1995).
# @param histo the histogram
# @param func the model
# @attention: the model can be any reasonable model.
# No need in multiplicative and additive terms:
# the affine transformations do not affect the result.
# e.g. following three graphs are equivalent:
# @code
# >>> histo = ...
# >>> gr1 = lw_graph ( histo , lambda x : math.exp(-x) )
# >>> gr2 = lw_graph ( histo , lambda x : 100*math.exp(-x) )
# >>> gr3 = lw_graph ( histo , lambda x : 100*math.exp(-x)-10 )
# @endcode
# If no reasonable model is known, the splines can be used instead:
# @code
# >>> histo =
# >>> spline = histo.(p,i,d)spline( .... )
# >>> graph = lw_graph ( histo , spline[2] )
# @endcode
# @see https://doi.org/10.1016/0168-9002(94)01112-5
# @author <NAME> <EMAIL>
# @date 2014-12-08
def lw_graph ( histo , func ) :
"""Convert the histogram to into ``Laffery-Wyatt'' graph
See <NAME> and <NAME>,
``Where to stick your data points: The treatment of measurements within wide bins,''
Nucl. Instrum. Meth. A355, 541 (1995).
>>> histo = ... ## the histogram
## the explicit model:
>>> graph = lw_graph ( histo , lambda x : math.exp(-x) )
## use splines:
>>> spline = histo.[p,i,d]spline( .... )
>>> graph = lw_graph ( histo , spline[2] )
>>> histo.Draw('e1')
>>> graph.Draw('e1p same')
"""
return _lw_graph_ ( histo , func )
# =============================================================================
## Create a graph, that represents the area between two curves/functions:
# @code
# import math
# graph = fill_area ( math.sin , math.cos , xmin = 0 , xmax = 5 )
# graph.Draw('f')
# @endcode
# ``Functions'' could be
# - plain functions
# - function objects
# - histograms
# - graphs
# - ...
# Inspired by <NAME> example
# @see https://root.cern.ch/phpBB3/viewtopic.php?t=6346
def fill_area ( fun1 ,
fun2 ,
n = 100 ,
xmin = neg_infinity ,
xmax = pos_infinity ,
log_scale = False ) :
"""Create a graph, that represents the area between
two curves/functions:
>>> import math
>>> graph = fill_area ( math.sin , math.cos , xmin = 0 , xmax = 5 )
>>> graph.Draw('f')
``Functions'' could be
- plain functions
- function objects
- histograms
- graphs
- ...
Inspired by <NAME> example
- see https://root.cern.ch/phpBB3/viewtopic.php?t=6346
"""
#
## try to define proper x-range for graph..
# - from input arguments
# - from fun1 and fun2.
x1mn , x1mx = neg_infinity , pos_infinity
if hasattr ( fun1 , 'xminmax' ) :
x1mn,x1mx = fun1.xminmax()
elif hasattr ( fun1 , 'xmin' ) and hasattr ( fun1 , 'xmax' ) :
x1mn,x1mx = fun1.xmin(), fun1.xmax()
elif hasattr ( fun1 , 'GetXmin' ) and hasattr ( fun1 , 'GetXmax' ) :
x1mn,x1mx = fun1.GetXmin(), fun1.GetXmax()
elif hasattr ( fun1 , 'GetXaxis' ) :
axis = fun1.GetXaxis()
x1mn,x1mx = axis.GetXmin(), axis.GetXmax()
x1mn = max ( x1mn , xmin )
x1mx = min ( x1mx , xmax )
x2mn , x2mx = neg_infinity , pos_infinity
if hasattr ( fun2 , 'xminmax' ) :
x2mn,x2mx = fun2.xminmax()
elif hasattr ( fun2 , 'xmin' ) and hasattr ( fun2 , 'xmax' ) :
x2mn,x2mx = fun2.xmin(), fun2.xmax()
elif hasattr ( fun2 , 'GetXmin' ) and hasattr ( fun2 , 'GetXmax' ) :
x2mn,x2mx = fun2.GetXmin(), fun2.GetXmax()
elif hasattr ( fun2 , 'GetXaxis' ) :
axis = fun2.GetXaxis()
x2mn,x2mx = axis.GetXmin(), axis.GetXmax()
x2mn = max ( x2mn , xmin )
x2mx = min ( x2mx , xmax )
## to be replaced with numpy.isfinite
if x1mn == neg_infinity and x2mn != neg_infinity : x1mn = x2mn
if x1mn != neg_infinity and x2mn == neg_infinity : x2mn = x1mn
if x1mx == pos_infinity and x2mn != pos_infinity : x1mx = x2mx
if x1mx != pos_infinity and x2mn == pos_infinity : x2mx = x1mx
if x1mn == neg_infinity or x2mn == | |
from .microfaune_package.microfaune.detection import RNNDetector
from .microfaune_package.microfaune import audio
import pandas as pd
import scipy.signal as scipy_signal
import numpy as np
import math
import os
def build_isolation_parameters(
technique,
threshold_type,
threshold_const,
threshold_min=0,
window_size=1.0,
chunk_size=2.0):
"""
Wrapper function for all of the audio isolation techniques (Steinberg,
Simple, Stack, Chunk). Will call the respective function of
each technique based on isolation_parameters "technique" key.
Args:
technique (string)
- Chooses which of the four isolation techniques to deploy
- options: "steinberg", "chunk", "stack", "simple"
threshold_type (string)
- Chooses how to derive a threshold from local score arrays
- options: "mean", "median", "standard deviation", "pure"
threshold_const (float)
- Multiplier for "mean", "median", and "standard deviation". Acts
as threshold for "pure"
threshold_min (float)
- Serves as a minimum barrier of entry for a local score to be
considered a positive ID of a class.
- default: 0
window_size (float)
- determines how many seconds around a positive ID local score
to build an annotation.
chunk_size (float)
- determines the length of annotation when using "chunk"
isolation technique
Returns:
isolation_parameters (dict)
- Python dictionary that controls how to go about isolating
automated labels from audio.
"""
isolation_parameters = {
"technique": technique,
"treshold_type": threshold_type,
"threshold_const": threshold_const,
"threshold_min": threshold_min,
"window_size": window_size,
"chunk_size": chunk_size
}
if window_size != 1.0 and technique != "steinberg":
print('''Warning: window_size is dedicated to the steinberg isolation
technique. Won't affect current technique.''')
if chunk_size != 2.0 and technique != "chunk":
print('''Warning: chunk_size is dedicated to the chunk technique.
Won't affect current technique.''')
return isolation_parameters
def isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird",
normalize_local_scores=False):
"""
Wrapper function for all of the audio isolation techniques (Steinberg,
Simple, Stack, Chunk). Will call the respective function of
each technique based on isolation_parameters "technique" key.
Args:
local_scores (list of floats)
- Local scores of the audio clip as determined by
Microfaune Recurrent Neural Network.
SIGNAL (list of ints)
- Samples that make up the audio signal.
SAMPLE_RATE (int)
- Sampling rate of the audio clip, usually 44100.
audio_dir (string)
- Directory of the audio clip.
filename (string)
- Name of the audio clip file.
isolation_parameters (dict)
- Python Dictionary that controls the various label creation
techniques.
Returns:
Dataframe of automated labels for the audio clip based on passed in
isolation technique.
"""
# normalize the local scores so that the max value is 1.
if normalize_local_scores:
local_scores_max = max(local_scores)
for ndx in range(len(local_scores)):
local_scores[ndx] = local_scores[ndx] / local_scores_max
# initializing the output dataframe that will contain labels across a
# single clip
isolation_df = pd.DataFrame()
# deciding which isolation technique to deploy for a given clip based on
# the technique isolation parameter
if isolation_parameters["technique"] == "simple":
isolation_df = simple_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird")
elif isolation_parameters["technique"] == "steinberg":
isolation_df = steinberg_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird")
elif isolation_parameters["technique"] == "stack":
isolation_df = stack_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird")
elif isolation_parameters["technique"] == "chunk":
isolation_df = chunk_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird")
return isolation_df
def threshold(local_scores, isolation_parameters):
"""
Takes in the local score array output from a neural network and determines
the threshold at which we determine a local score to be a positive
ID of a class of interest. Most proof of concept work is dedicated to bird
presence. Threshold is determined by "threshold_type" and "threshold_const"
from the isolation_parameters dictionary.
Args:
local_scores (list of floats)
- Local scores of the audio clip as determined by Microfaune
Recurrent Neural Network.
isolation_parameters (dict)
- Python Dictionary that controls the various label creation
techniques.
Returns:
thresh (float)
- threshold at which the local scores in the local score array of
an audio clip will be viewed as a positive ID.
"""
if isolation_parameters["threshold_type"] == "median":
thresh = np.median(local_scores) \
* isolation_parameters["threshold_const"]
elif (isolation_parameters["threshold_type"] == "mean" or
isolation_parameters["threshold_type"] == "average"):
thresh = np.mean(local_scores) \
* isolation_parameters["threshold_const"]
elif isolation_parameters["threshold_type"] == "standard deviation":
thresh = np.mean(local_scores) + \
(np.std(local_scores) * isolation_parameters["threshold_const"])
elif isolation_parameters["threshold_type"] == "pure":
thresh = isolation_parameters["threshold_const"]
if thresh < 0:
print("Threshold is less than zero, setting to zero")
thresh = 0
elif thresh > 1:
print("Threshold is greater than one, setting to one.")
thresh = 1
return thresh
def steinberg_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird"):
"""
Technique developed by <NAME> that attempts to take the local
score array output of a neural network and lump local scores together in a
way to produce automated labels based on a class across an audio clip.
Technique Pseudocode:
Loop through local score array:
if current local score > (threshold and threshold_min):
build an annotation with current local score at the center with
+- window_size/2 seconds around current local score.
else:
continue
extra logic handles overlap if a local score meets the criteria within
the "window_size" from a prior local score
Args:
local_scores (list of floats)
- Local scores of the audio clip as determined by RNNDetector.
SIGNAL (list of ints)
- Samples from the audio clip.
SAMPLE_RATE (int)
- Sampling rate of the audio clip, usually 44100.
audio_dir (string)
- Directory of the audio clip.
filename (string)
- Name of the audio clip file.
isolation_parameters (dict)
- Python Dictionary that controls the various label creation
techniques.
manual_id (string)
- controls the name of the class written to the pandas dataframe
Returns:
Pandas Dataframe of automated labels for the audio clip.
"""
# calculate original duration
old_duration = len(SIGNAL) / SAMPLE_RATE
# create entry for audio clip
entry = {'FOLDER': audio_dir,
'IN FILE': filename,
'CHANNEL': 0,
'CLIP LENGTH': old_duration,
'SAMPLE RATE': SAMPLE_RATE,
'OFFSET': [],
'MANUAL ID': []}
# calculating threshold that will define how labels are created in current
# audio clip
thresh = threshold(local_scores, isolation_parameters)
# how many samples one local score represents
samples_per_score = len(SIGNAL) // len(local_scores)
# isolate samples that produce a score above thresh
isolated_samples = np.empty(0, dtype=np.int16)
prev_cap = 0 # sample idx of previously captured
for i in range(len(local_scores)):
# if a score hits or surpasses thresh, capture 1s on both sides of it
if (local_scores[i] >= thresh and
local_scores[i] >= isolation_parameters["threshold_min"]):
# score_pos is the sample index that the score corresponds to
score_pos = i * samples_per_score
# upper and lower bound of captured call
# sample rate is # of samples in 1 second: +-1 second
lo_idx = max(
0,
score_pos - int(isolation_parameters["window_size"]
/ 2 * SAMPLE_RATE))
hi_idx = min(
len(SIGNAL),
score_pos + int(isolation_parameters["window_size"]
/ 2 * SAMPLE_RATE))
lo_time = lo_idx / SAMPLE_RATE
hi_time = hi_idx / SAMPLE_RATE
# calculate start and end stamps
# create new sample if not overlapping or if first stamp
if prev_cap < lo_idx or prev_cap == 0:
# New label
new_stamp = [lo_time, hi_time]
# TODO make it so that here we get the duration
entry['OFFSET'].append(new_stamp)
entry['MANUAL ID'].append(manual_id)
# extend same stamp if still overlapping
else:
entry['OFFSET'][-1][1] = hi_time
# mark previously captured to prevent overlap collection
lo_idx = max(prev_cap, lo_idx)
prev_cap = hi_idx
# add to isolated samples
# sub-clip numpy array
isolated_samples = np.append(
isolated_samples, SIGNAL[lo_idx:hi_idx])
entry = pd.DataFrame.from_dict(entry)
# TODO, when you go through the process of rebuilding this isolate function
# as a potential optimization problem
# rework the algorithm so that it builds the dataframe correctly to save
# time.
OFFSET = entry['OFFSET'].str[0]
DURATION = entry['OFFSET'].str[1]
DURATION = DURATION - OFFSET
# Adding a new "DURATION" Column
# Making compatible with Kaleidoscope
entry.insert(6, "DURATION", DURATION)
entry["OFFSET"] = OFFSET
return entry
def simple_isolate(
local_scores,
SIGNAL,
SAMPLE_RATE,
audio_dir,
filename,
isolation_parameters,
manual_id="bird"):
"""
Technique suggested by <NAME> and implemented by <NAME>.
Attempts to produce automated annotations of an audio clip based
on local score array outputs from a neural network.
Technique Pseudocode:
Loop through local score array:
if current local score > (threshold and threshold_min)
and annotation start = 0:
start annotation
else if current local score < thresh and annotation start = 1:
end annotation
else:
continue
Args:
local_scores (list of floats)
- Local | |
E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_config_source(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_config_source_with_http_info(file, **kwargs) # noqa: E501
else:
(data) = self.import_config_source_with_http_info(file, **kwargs) # noqa: E501
return data
def import_config_source_with_http_info(self, file, **kwargs): # noqa: E501
"""import config source via xml # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_config_source_with_http_info(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_config_source" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `import_config_source`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/configsources/importxml', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def import_data_source(self, file, **kwargs): # noqa: E501
"""import datasource via xml # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_data_source(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_data_source_with_http_info(file, **kwargs) # noqa: E501
else:
(data) = self.import_data_source_with_http_info(file, **kwargs) # noqa: E501
return data
def import_data_source_with_http_info(self, file, **kwargs): # noqa: E501
"""import datasource via xml # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_data_source_with_http_info(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_data_source" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `import_data_source`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/datasources/importxml', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def import_event_source(self, file, **kwargs): # noqa: E501
"""import eventsource via xml # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_event_source(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_event_source_with_http_info(file, **kwargs) # noqa: E501
else:
(data) = self.import_event_source_with_http_info(file, **kwargs) # noqa: E501
return data
def import_event_source_with_http_info(self, file, **kwargs): # noqa: E501
"""import eventsource via xml # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_event_source_with_http_info(file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file file: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_event_source" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `import_event_source`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/eventsources/importxml', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_admin_by_id(self, id, body, **kwargs): # noqa: E501
"""update user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_admin_by_id(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Admin body: (required)
:param bool change_password:
:return: Admin
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_admin_by_id_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.patch_admin_by_id_with_http_info(id, body, **kwargs) # noqa: E501
return data
def patch_admin_by_id_with_http_info(self, id, body, **kwargs): # noqa: E501
"""update user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_admin_by_id_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param Admin body: (required)
:param bool change_password:
:return: Admin
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'change_password'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_admin_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `patch_admin_by_id`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_admin_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `patch_admin_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'change_password' in params:
query_params.append(('changePassword', params['change_password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
| |
"""Generated message classes for gkebackup version v1alpha1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'gkebackup'
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:<EMAIL>" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts <EMAIL> from DATA_READ logging, and
<EMAIL> from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting <EMAIL> from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Backup(_messages.Message):
r"""Backup resource Next id: 28
Enums:
StateValueValuesEnum: Current state of the Backup
Messages:
LabelsValue: A set of custom labels supplied by user.
Fields:
allNamespaces: Output only. If set to true, all namespaces backup
clusterConfigSizeBytes: Output only. cluster config backup size in bytes.
clusterMetadata: cluster metadata
completeTime: Output only. Completion time of the Backup
containsSecrets: Output only. A boolean flag specifies whether secrets has
been backed up
containsVolumeData: Output only. A boolean flag specifies whether volume
data has been backed up
createTime: Output only. [Output Only] The timestamp when this Backup
resource was created which can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
deleteLockDays: delete_lock_days specifies the number of days from the
create_time of this Backup before which deletion will be blocked. For
automatically created Backup from schedule, this field will be set to
the BackupPlan.RetentionPolicy.backup_delete_block_days. Manual creation
of a Backup with this field unspecified causes the service to use the
value of BackupPlan.RetentionPolicy.backup_delete_block_days. Creation
of a Backup with this field set to a value SMALLER than
BackupPlan.RetentionPolicy.backup_delete_block_days results in an
invalid response from the service. This field MUST be an int value
between 0-90(inclusive). This field may only be INCREASED in an Update
request, or an invalid response will be returned by the service. Note
that this field ONLY applies to Backups with a SUCCEEDED state. Default
to 0.
deleteLockExpireTime: Output only. delete_lock_expire_time suggest the
timestamp after which the deletion lock will expire. It's an output only
field calculated from create_time + delete_lock_days, and will be
updated accordingly when a Backup's delete_lock_days field has been
updated. Note that this field ONLY applies to Backups with a SUCCEEDED
state.
description: User specified descriptive string for this Backup.
encryptionKey: Output only. encryption key, immutable.
etag: Output only. `etag` is used for optimistic concurrency control as a
way to help prevent simultaneous updates of a backup from overwriting
each other. It is strongly suggested that systems make use of the `etag`
in the read-modify-write cycle to perform backup updates in order to
avoid race conditions: An `etag` is returned in the response to
`GetBackup`, and systems are expected to put that etag in the request to
`UpdateBackup` to ensure that their change will be applied to the same
version.
labels: A set of custom labels supplied by user.
manual: Output only. manual, a boolean flag suggests whether this Backup
resource has been created manually. "True" suggests this backup has been
created manually, "False" suggests this backup has been created
automatically from the BackupPlan's schedule.
name: Output only. The fully qualified name of the Backup.
projects/*/locations/*/backupPlans/*/backups/*
podCount: Output only. total number of pods backed up.
resourceCount: Output only. total number of resources backed up
retainDays: retain_days specifies the desired number of days from the
create_time of this Backup after which it will be automatically deleted.
If not specified or set to 0, it means the Backup will NOT be
automatically deleted. For automatically created Backup from schedule,
this field will be set to the
BackupPlan.RetentionPolicy.backup_retain_days. Manual creation of a
Backup with this field unspecified causes the service to use the value
of BackupPlan.RetentionPolicy.backup_retain_days. Creation of a Backup
with this field set to a value SMALLER than delete_lock_days results in
an invalid response from the service. This field may ONLY be increased
in an Update request, or an invalid response will be returned by the
service immediately. Default to 0.
retainExpireTime: Output only. retain_expire_time suggest the timestamp
after which the Backup will be automatically deleted. It's an output
only field calculated from create_time + retain_days, and will be
updated accordingly when a Backup's retain_days field has been updated.
selectedApplications: Output only. If set, the list of applications backed
up
selectedNamespaces: Output only. If set, the list of namespaces backed up
sizeBytes: Output only. total size in bytes
state: Current state of the Backup
stateReason: Human-readable description of why the backup is in the
current 'state'.
uid: Output only. [Output Only] Server generated global unique identifier
of [UUID4](https://en.wikipedia.org/wiki/Universally_unique_identifier)
updateTime: Output only. [Output Only] The timestamp when this BackupPlan
resource was last updated which can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
volumeCount: total number of volumes backed up
"""
class StateValueValuesEnum(_messages.Enum):
r"""Current state of the Backup
Values:
BACKUP_STATE_UNSPECIFIED: BACKUP_STATE_UNSPECIFIED
CREATING: CREATING
IN_PROGRESS: IN_PROGRESS
SUCCEEDED: SUCCEEDED
FAILED: FAILED
DELETING: Backup
"""
BACKUP_STATE_UNSPECIFIED = 0
CREATING = 1
IN_PROGRESS = 2
SUCCEEDED = 3
FAILED = 4
DELETING = 5
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""A set of custom labels supplied by user.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allNamespaces = _messages.BooleanField(1)
clusterConfigSizeBytes = _messages.IntegerField(2)
clusterMetadata = _messages.MessageField('ClusterMetadata', 3)
completeTime = _messages.StringField(4)
containsSecrets = _messages.BooleanField(5)
containsVolumeData = _messages.BooleanField(6)
createTime = _messages.StringField(7)
deleteLockDays = _messages.IntegerField(8, variant=_messages.Variant.INT32)
deleteLockExpireTime = _messages.StringField(9)
description = _messages.StringField(10)
encryptionKey = _messages.MessageField('EncryptionKey', 11)
etag = _messages.StringField(12)
labels = _messages.MessageField('LabelsValue', 13)
manual = _messages.BooleanField(14)
name = _messages.StringField(15)
podCount = _messages.IntegerField(16, variant=_messages.Variant.INT32)
resourceCount = _messages.IntegerField(17, variant=_messages.Variant.INT32)
retainDays = _messages.IntegerField(18, variant=_messages.Variant.INT32)
retainExpireTime = _messages.StringField(19)
selectedApplications = _messages.MessageField('NamespacedNames', 20)
selectedNamespaces = _messages.MessageField('Namespaces', 21)
sizeBytes = _messages.IntegerField(22)
state = _messages.EnumField('StateValueValuesEnum', 23)
stateReason = _messages.StringField(24)
uid = _messages.StringField(25)
updateTime = _messages.StringField(26)
volumeCount = _messages.IntegerField(27, variant=_messages.Variant.INT32)
class BackupConfig(_messages.Message):
r"""BackupConfig, an inner message type defines the configuration of
creating a backup from this BackupPlan
Fields:
allNamespaces: If set to true, backup whole cluster
encryptionKey: Custom encryption key. For preview, support GCP KMS only.
includeSecrets: A boolean flag specifies whether secrets should be backed
up | |
version=1)
class Microsoft_Office_Word_30189_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30190, version=1)
class Microsoft_Office_Word_30190_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30191, version=1)
class Microsoft_Office_Word_30191_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30192, version=1)
class Microsoft_Office_Word_30192_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30193, version=1)
class Microsoft_Office_Word_30193_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30194, version=1)
class Microsoft_Office_Word_30194_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30195, version=1)
class Microsoft_Office_Word_30195_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30196, version=1)
class Microsoft_Office_Word_30196_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30197, version=1)
class Microsoft_Office_Word_30197_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30198, version=1)
class Microsoft_Office_Word_30198_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30199, version=1)
class Microsoft_Office_Word_30199_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30200, version=1)
class Microsoft_Office_Word_30200_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30201, version=1)
class Microsoft_Office_Word_30201_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30202, version=1)
class Microsoft_Office_Word_30202_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30203, version=1)
class Microsoft_Office_Word_30203_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30204, version=1)
class Microsoft_Office_Word_30204_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30205, version=1)
class Microsoft_Office_Word_30205_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30206, version=1)
class Microsoft_Office_Word_30206_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30207, version=1)
class Microsoft_Office_Word_30207_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30208, version=1)
class Microsoft_Office_Word_30208_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30209, version=1)
class Microsoft_Office_Word_30209_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30210, version=1)
class Microsoft_Office_Word_30210_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30211, version=1)
class Microsoft_Office_Word_30211_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30212, version=1)
class Microsoft_Office_Word_30212_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30213, version=1)
class Microsoft_Office_Word_30213_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30214, version=1)
class Microsoft_Office_Word_30214_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30215, version=1)
class Microsoft_Office_Word_30215_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30216, version=1)
class Microsoft_Office_Word_30216_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30217, version=1)
class Microsoft_Office_Word_30217_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30218, version=1)
class Microsoft_Office_Word_30218_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30219, version=1)
class Microsoft_Office_Word_30219_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30220, version=1)
class Microsoft_Office_Word_30220_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30221, version=1)
class Microsoft_Office_Word_30221_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30222, version=1)
class Microsoft_Office_Word_30222_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30223, version=1)
class Microsoft_Office_Word_30223_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30224, version=1)
class Microsoft_Office_Word_30224_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30225, version=1)
class Microsoft_Office_Word_30225_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30226, version=1)
class Microsoft_Office_Word_30226_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30227, version=1)
class Microsoft_Office_Word_30227_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30228, version=1)
class Microsoft_Office_Word_30228_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30229, version=1)
class Microsoft_Office_Word_30229_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30230, version=1)
class Microsoft_Office_Word_30230_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30231, version=1)
class Microsoft_Office_Word_30231_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30232, version=1)
class Microsoft_Office_Word_30232_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30233, version=1)
class Microsoft_Office_Word_30233_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30234, version=1)
class Microsoft_Office_Word_30234_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30235, version=1)
class Microsoft_Office_Word_30235_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30236, version=1)
class Microsoft_Office_Word_30236_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30237, version=1)
class Microsoft_Office_Word_30237_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30238, version=1)
class Microsoft_Office_Word_30238_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30239, version=1)
class Microsoft_Office_Word_30239_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30240, version=1)
class Microsoft_Office_Word_30240_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30241, version=1)
class Microsoft_Office_Word_30241_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30242, version=1)
class Microsoft_Office_Word_30242_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30243, version=1)
class Microsoft_Office_Word_30243_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30244, version=1)
class Microsoft_Office_Word_30244_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30245, version=1)
class Microsoft_Office_Word_30245_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30246, version=1)
class Microsoft_Office_Word_30246_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30247, version=1)
class Microsoft_Office_Word_30247_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30248, version=1)
class Microsoft_Office_Word_30248_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30249, version=1)
class Microsoft_Office_Word_30249_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30250, version=1)
class Microsoft_Office_Word_30250_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30251, version=1)
class Microsoft_Office_Word_30251_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30252, version=1)
class Microsoft_Office_Word_30252_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30253, version=1)
class Microsoft_Office_Word_30253_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30254, version=1)
class Microsoft_Office_Word_30254_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30255, version=1)
class Microsoft_Office_Word_30255_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30256, version=1)
class Microsoft_Office_Word_30256_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30257, version=1)
class Microsoft_Office_Word_30257_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30258, version=1)
class Microsoft_Office_Word_30258_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30259, version=1)
class Microsoft_Office_Word_30259_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30260, version=1)
class Microsoft_Office_Word_30260_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30261, version=1)
class Microsoft_Office_Word_30261_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30262, version=1)
class Microsoft_Office_Word_30262_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30263, version=1)
class Microsoft_Office_Word_30263_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30264, version=1)
class Microsoft_Office_Word_30264_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30265, version=1)
class Microsoft_Office_Word_30265_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30266, version=1)
class Microsoft_Office_Word_30266_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30267, version=1)
class Microsoft_Office_Word_30267_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30268, version=1)
class Microsoft_Office_Word_30268_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30269, version=1)
class Microsoft_Office_Word_30269_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30270, version=1)
class Microsoft_Office_Word_30270_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30271, version=1)
class Microsoft_Office_Word_30271_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30272, version=1)
class Microsoft_Office_Word_30272_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30273, version=1)
class Microsoft_Office_Word_30273_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30274, version=1)
class Microsoft_Office_Word_30274_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30275, version=1)
class Microsoft_Office_Word_30275_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30276, version=1)
class Microsoft_Office_Word_30276_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30277, version=1)
class Microsoft_Office_Word_30277_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30278, version=1)
class Microsoft_Office_Word_30278_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30279, version=1)
class Microsoft_Office_Word_30279_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30280, version=1)
class Microsoft_Office_Word_30280_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30281, version=1)
class Microsoft_Office_Word_30281_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30282, version=1)
class Microsoft_Office_Word_30282_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30283, version=1)
class Microsoft_Office_Word_30283_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30284, version=1)
class Microsoft_Office_Word_30284_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30285, version=1)
class Microsoft_Office_Word_30285_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30286, version=1)
class Microsoft_Office_Word_30286_1(Etw):
pattern = Struct(
"tag" / WString,
"xsz" / WString
)
@declare(guid=guid("daf0b914-9c1c-450a-81b2-fea7244f6ffa"), event_id=30287, version=1)
class Microsoft_Office_Word_30287_1(Etw):
pattern = Struct(
"tag" | |
'vn',
'vna',
'vno',
},
'lt': {
'á',
'ákypai',
'ástriþai',
'ðájá',
'ðalia',
'ðe',
'ðiàjà',
'ðiàja',
'ðiàsias',
'ðiøjø',
'ðiøjø',
'ði',
'ðiaisiais',
'ðiajai',
'ðiajam',
'ðiajame',
'ðiapus',
'ðiedvi',
'ðieji',
'ðiesiems',
'ðioji',
'ðiojo',
'ðiojoje',
'ðiokia',
'ðioks',
'ðiosiomis',
'ðiosioms',
'ðiosios',
'ðiosios',
'ðiosiose',
'ðis',
'ðisai',
'ðit',
'ðita',
'ðitas',
'ðitiedvi',
'ðitokia',
'ðitoks',
'ðituodu',
'ðiuodu',
'ðiuoju',
'ðiuosiuose',
'ðiuosius',
'ðtai',
'þemiau',
'að',
'abi',
'abidvi',
'abiejø',
'abiejose',
'abiejuose',
'abiem',
'abigaliai',
'abipus',
'abu',
'abudu',
'ai',
'anàjà',
'anàjá',
'anàja',
'anàsias',
'anøjø',
'anøjø',
'ana',
'ana',
'anaiptol',
'anaisiais',
'anajai',
'anajam',
'anajame',
'anapus',
'anas',
'anasai',
'anasis',
'anei',
'aniedvi',
'anieji',
'aniesiems',
'anoji',
'anojo',
'anojoje',
'anokia',
'anoks',
'anosiomis',
'anosioms',
'anosios',
'anosios',
'anosiose',
'anot',
'ant',
'antai',
'anuodu',
'anuoju',
'anuosiuose',
'anuosius',
'apie',
'aplink',
'ar',
'ar',
'arba',
'argi',
'arti',
'aukðèiau',
'be',
'be',
'bei',
'beje',
'bemaþ',
'bent',
'bet',
'betgi',
'beveik',
'dëka',
'dël',
'dëlei',
'dëlto',
'dar',
'dargi',
'daugmaþ',
'deja',
'ech',
'et',
'gal',
'galbût',
'galgi',
'gan',
'gana',
'gi',
'greta',
'ið',
'iðilgai',
'iðvis',
'idant',
'iki',
'iki',
'ir',
'irgi',
'it',
'itin',
'jàjà',
'jàja',
'jàsias',
'jájá',
'jøjø',
'jøjø',
'jûsø',
'jûs',
'jûsiðkë',
'jûsiðkis',
'jaisiais',
'jajai',
'jajam',
'jajame',
'jei',
'jeigu',
'ji',
'jiedu',
'jiedvi',
'jieji',
'jiesiems',
'jinai',
'jis',
'jisai',
'jog',
'joji',
'jojo',
'jojoje',
'jokia',
'joks',
'josiomis',
'josioms',
'josios',
'josios',
'josiose',
'judu',
'judvi',
'juk',
'jumis',
'jums',
'jumyse',
'juodu',
'juoju',
'juosiuose',
'juosius',
'jus',
'kaþin',
'kaþkas',
'kaþkatra',
'kaþkatras',
'kaþkokia',
'kaþkoks',
'kaþkuri',
'kaþkuris',
'kad',
'kada',
'kadangi',
'kai',
'kaip',
'kaip',
'kaipgi',
'kas',
'katra',
'katras',
'katriedvi',
'katruodu',
'kiaurai',
'kiek',
'kiekvienas',
'kieno',
'kita',
'kitas',
'kitokia',
'kitoks',
'kodël',
'kokia',
'koks',
'kol',
'kolei',
'kone',
'kuomet',
'kur',
'kurgi',
'kuri',
'kuriedvi',
'kuris',
'kuriuodu',
'lai',
'lig',
'ligi',
'link',
'lyg',
'mûsø',
'mûsiðkë',
'mûsiðkis',
'maþdaug',
'maþne',
'manàjà',
'manàjá',
'manàja',
'manàsias',
'manæs',
'manøjø',
'manøjø',
'man',
'manaisiais',
'manajai',
'manajam',
'manajame',
'manas',
'manasai',
'manasis',
'mane',
'maniðkë',
'maniðkis',
'manieji',
'maniesiems',
'manim',
'manimi',
'mano',
'manoji',
'manojo',
'manojoje',
'manosiomis',
'manosioms',
'manosios',
'manosios',
'manosiose',
'manuoju',
'manuosiuose',
'manuosius',
'manyje',
'mat',
'mes',
'mudu',
'mudvi',
'mumis',
'mums',
'mumyse',
'mus',
'në',
'na',
'nagi',
'ne',
'nebe',
'nebent',
'nebent',
'negi',
'negu',
'nei',
'nei',
'nejau',
'nejaugi',
'nekaip',
'nelyginant',
'nes',
'net',
'netgi',
'netoli',
'neva',
'nors',
'nors',
'nuo',
'o',
'ogi',
'ogi',
'oi',
'paèiø',
'paèiais',
'paèiam',
'paèiame',
'paèiu',
'paèiuose',
'paèius',
'paeiliui',
'pagal',
'pakeliui',
'palaipsniui',
'palei',
'pas',
'pasak',
'paskos',
'paskui',
'paskum',
'patá',
'pat',
'pati',
'patiems',
'paties',
'pats',
'patys',
'per',
'per',
'pernelyg',
'pirm',
'pirma',
'pirmiau',
'po',
'prieð',
'prieðais',
'prie',
'pro',
'pusiau',
'rasi',
'rodos',
'sau',
'savàjà',
'savàjá',
'savàja',
'savàsias',
'savæs',
'savøjø',
'savøjø',
'savaisiais',
'savajai',
'savajam',
'savajame',
'savas',
'savasai',
'savasis',
'save',
'saviðkë',
'saviðkis',
'savieji',
'saviesiems',
'savimi',
'savo',
'savoji',
'savojo',
'savojoje',
'savosiomis',
'savosioms',
'savosios',
'savosios',
'savosiose',
'savuoju',
'savuosiuose',
'savuosius',
'savyje',
'skersai',
'skradþiai',
'staèiai',
'su',
'sulig',
'tàjà',
'tàjá',
'tàja',
'tàsias',
'tøjø',
'tøjø',
'tûlas',
'taèiau',
'ta',
'tad',
'tai',
'tai',
'taigi',
'taigi',
'taip',
'taipogi',
'taisiais',
'tajai',
'tajam',
'tajame',
'tamsta',
'tarp',
'tarsi',
'tarsi',
'tartum',
'tartum',
'tarytum',
'tas',
'tasai',
'tau',
'tavàjà',
'tavàjá',
'tavàja',
'tavàsias',
'tavæs',
'tavøjø',
'tavøjø',
'tavaisiais',
'tavajai',
'tavajam',
'tavajame',
'tavas',
'tavasai',
'tavasis',
'tave',
'taviðkë',
'taviðkis',
'tavieji',
'taviesiems',
'tavimi',
'tavo',
'tavoji',
'tavojo',
'tavojoje',
'tavosiomis',
'tavosioms',
'tavosios',
'tavosios',
'tavosiose',
'tavuoju',
'tavuosiuose',
'tavuosius',
'tavyje',
'te',
'tegu',
'tegu',
'tegul',
'tegul',
'tiedvi',
'tieji',
'ties',
'tiesiems',
'tiesiog',
'tik',
'tik',
'tikriausiai',
'tiktai',
'tiktai',
'toji',
'tojo',
'tojoje',
'tokia',
'toks',
'tol',
'tolei',
'toliau',
'tosiomis',
'tosioms',
'tosios',
'tosios',
'tosiose',
'tu',
'tuodu',
'tuoju',
'tuosiuose',
'tuosius',
'turbût',
'uþ',
'uþtat',
'uþvis',
'vël',
'vëlgi',
'va',
'vai',
'viduj',
'vidury',
'vien',
'vienas',
'vienokia',
'vienoks',
'vietoj',
'virð',
'virðuj',
'virðum',
'vis',
'vis dëlto',
'visa',
'visas',
'visgi',
'visokia',
'visoks',
'vos',
'vos',
'ypaè',
},
'lv': {
'ārpus',
'šaipus',
'aiz',
'ap',
'apakš',
'apakšpus',
'arī',
'ar',
'ar',
'augšpus',
'būšu',
'būs',
'būsi',
'būsiet',
'būsim',
'būt',
'bet',
'bez',
'bijām',
'bijāt',
'bija',
'biji',
'biju',
'caur',
'dēļ',
'diemžēl',
'diezin',
'droši',
'esam',
'esat',
'esi',
'esmu',
'gan',
'gar',
'iekām',
'iekāms',
'iekš',
'iekšpus',
'iekam',
'iekams',
'ik',
'ir',
'it',
'itin',
'iz',
'jā',
'ja',
'jau',
'jebšu',
'jeb',
'jel',
'jo',
'kā',
'kļūšu',
'kļūs',
'kļūsi',
'kļūsiet',
'kļūsim',
'kļūst',
'kļūstam',
'kļūstat',
'kļūsti',
'kļūstu',
'kļūt',
'kļuvām',
'kļuvāt',
'kļuva',
'kļuvi',
'kļuvu',
'ka',
'kamēr',
'kaut',
'kolīdz',
'kopš',
'līdz',
'līdzko',
'labad',
'lai',
'lejpus',
'nē',
'ne',
'nebūt',
'nedz',
'nekā',
'nevis',
'nezin',
'no',
'nu',
'otrpus',
'pār',
'pēc',
'pa',
'par',
'pat',
'pie',
'pirms',
'pret',
'priekš',
'starp',
'tā',
'tādēļ',
'tālab',
'tāpēc',
'taču',
'tad',
'tak',
'tapāt',
'tapšu',
'tapi',
'taps',
'tapsi',
'tapsiet',
'tapsim',
'tapt',
'te',
'tiec',
'tiek',
'tiekam',
'tiekat',
'tieku',
'tikām',
'tikāt',
'tikšu',
'tik',
'tika',
'tikai',
'tiki',
'tikko',
'tiklīdz',
'tiklab',
'tiks',
'tiksiet',
'tiksim',
'tikt',
'tiku',
'tikvien',
'tomēr',
'topat',
'turpretī',
'turpretim',
'un',
'uz',
'vai',
'varēšu',
'varējām',
'varējāt',
'varēja',
'varēji',
'varēju',
'varēs',
'varēsi',
'varēsiet',
'varēsim',
'varēt',
'var',
'varat',
'viņpus',
'vien',
'vien',
'virs',
'virspus',
'vis',
'zem',
},
'nl': {
'aan',
'af',
'al',
'als',
'bij',
'dan',
'dat',
'die',
'dit',
'een',
'en',
'er',
'had',
'heb',
'hem',
'het',
'hij',
'hoe',
'hun',
'ik',
'in',
'is',
'je',
'kan',
'me',
'men',
'met',
'mij',
'nog',
'nu',
'of',
'ons',
'ook',
'te',
'tot',
'uit',
'van',
'was',
'wat',
'we',
'wel',
'wij',
'zal',
'ze',
'zei',
'zij',
'zo',
'zou',
},
'no': {
'Å',
'alle',
'andre',
'arbeid',
'av',
'begge',
'bort',
'bra',
'bruke',
'da',
'denne',
'der',
'deres',
'det',
'din',
'disse',
'du',
'eller',
'en',
'ene',
'eneste',
'enhver',
'enn',
'er',
'et',
'fÅ',
'folk',
'for',
'fordi',
'forsÛke',
'fra',
'fÛr',
'fÛrst',
'gÅ',
'gjorde',
'gjÛre',
'god',
'ha',
'hadde',
'han',
'hans',
'hennes',
'her',
'hva',
'hvem',
'hver',
'hvilken',
'hvis',
'hvor',
'hvordan',
'hvorfor',
'i',
'ikke',
'inn',
'innen',
'kan',
'kunne',
'lage',
'lang',
'lik',
'like',
'mÅ',
'makt',
'mange',
'mÅte',
'med',
'meg',
'meget',
'men',
'mens',
'mer',
'mest',
'min',
'mye',
'nÅ',
'nÅr',
'navn',
'nei',
'ny',
'og',
'ogsÅ',
'om',
'opp',
'oss',
'over',
'pÅ',
'part',
'punkt',
'rett',
'riktig',
'sÅ',
'samme',
'sant',
'si',
'siden',
'sist',
'skulle',
'slik',
'slutt',
'som',
'start',
'stille',
'tid',
'til',
'tilbake',
'tilstand',
'under',
'ut',
'uten',
'var',
'vÅr',
'ved',
'verdi',
'vi',
'vil',
'ville',
'vite',
'vÖre',
'vÖrt',
},
'pl': {
'a',
'aby',
'ach',
'acz',
'aczkolwiek',
'aj',
'albo',
'ale',
'ależ',
'aż',
'bardziej',
'bardzo',
'będą',
'będzie',
'bez',
'bo',
'bowiem',
'by',
'być',
'był',
'była',
'byli',
'było',
'były',
'bynajmniej',
'cała',
'cali',
'cały',
'ci',
'cię',
'ciebie',
'co',
'cokolwiek',
'coś',
'czasami',
'czasem',
'czemu',
'czy',
'czyli',
'daleko',
'dla',
'dlaczego',
'dlatego',
'do',
'dobrze',
'dokąd',
'dość',
'dużo',
'dwa',
'dwaj',
'dwie',
'dwoje',
'dziś',
'dzisiaj',
'gdy',
'gdyby',
'gdyż',
'gdzie',
'gdziekolwiek',
'gdzieś',
'go',
'i',
'ich',
'ile',
'im',
'inna',
'inne',
'inny',
'innych',
'iż',
'ja',
'ją',
'jak',
'jakaś',
'jakby',
'jaki',
'jakichś',
'jakie',
'jakiś',
'jakiż',
'jakkolwiek',
'jako',
'jakoś',
'je',
'jeden',
'jedna',
'jednak',
'jednakże',
'jedno',
'jego',
'jej',
'jemu',
'jeśli',
'jest',
'jestem',
'jeszcze',
'jeżeli',
'już',
'każdy',
'kiedy',
'kierunku',
'kilka',
'kimś',
'kto',
'ktokolwiek',
'która',
'które',
'którego',
'której',
'który',
'których',
'którym',
'którzy',
'ktoś',
'ku',
'lat',
'lecz',
'lub',
'ma',
'mają',
'mam',
'mi',
'między',
'mimo',
'mną',
'mnie',
'mogą',
'moi',
'moim',
'mój',
'moja',
'moje',
'może',
'możliwe',
'można',
'mu',
'musi',
'my',
'na',
'nad',
'nam',
'nami',
'nas',
'nasi',
'nasz',
'nasza',
'nasze',
'naszego',
'naszych',
'natomiast',
'natychmiast',
'nawet',
'nią',
'nic',
'nich',
'nie',
'niego',
'niej',
'niemu',
'nigdy',
'nim',
'nimi',
'niż',
'no',
'o',
'obok',
'od',
'około',
'on',
'ona',
'one',
'oni',
'ono',
'oraz',
'owszem',
'pan',
'pana',
'pani',
'po',
'pod',
'podczas',
'pomimo',
'ponad',
'ponieważ',
'powinien',
'powinna',
'powinni',
'powinno',
'poza',
'prawie',
'przecież',
'przed',
'przede',
'przedtem',
'przez',
'przy',
'roku',
'również',
'są',
'sam',
'sama',
'się',
'skąd',
'sobą',
'sobie',
'sposób',
'swoje',
'ta',
'tak',
'taka',
'taki',
'takie',
'także',
'tam',
'te',
'tego',
'tej',
'ten',
'teraz',
'też',
'to',
'tobą',
'tobie',
'toteż',
'totobą',
'trzeba',
'tu',
'tutaj',
'twoi',
'twoim',
'twój',
'twoja',
'twoje',
'twym',
'ty',
'tych',
'tylko',
'tym',
'u',
'w',
'wam',
'wami',
'was',
'wasi',
'wasz',
'wasza',
'wasze',
'we',
'według',
'więc',
'więcej',
'wiele',
'wielu',
'właśnie',
'wszyscy',
'wszystkich',
'wszystkie',
'wszystkim',
'wszystko',
'wtedy',
'wy',
'z',
'za',
'żaden',
'żadna',
'żadne',
'żadnych',
'zapewne',
'zawsze',
'że',
'żeby',
'zeznowu',
'znów',
'został',
},
'pt': {
'a',
'afora',
'ante',
'após',
'até',
'cara',
'com',
'como',
'connosco',
'consoante',
'contra',
'de',
'desde',
'diante',
'durante',
'em',
'entre',
'escontra',
'excepto',
'exceto',
'fora',
'in',
'inté',
'malgrado',
'mediante',
'menos',
'para',
'per',
'pera',
'perante',
'por',
'pra',
'prà',
'salvante',
'salvo',
'segundo',
'sem',
'senão',
'sob',
'sobre',
'suso',
'tirante',
'trás',
'via',
'está',
'estamos',
'estão',
'estive',
'esteve',
'estivemos',
'estiveram',
'estava',
'estávamos',
'estavam',
'estivera',
'estivéramos',
'esteja',
'estejamos',
'estejam',
'estivesse',
'estivéssemos',
'estivessem',
'estiver',
'estivermos',
'estiverem',
'hei',
'há',
'havemos',
'hão',
'houve',
'houvemos',
'houveram',
'houvera',
'houvéramos',
'haja',
'hajamos',
'hajam',
'houvesse',
'houvéssemos',
'houvessem',
'houver',
'houvermos',
'houverem',
'houverei',
'houverá',
'houveremos',
'houverão',
'houveria',
'houveríamos',
'houveriam',
'sou',
'somos',
'são',
'era',
'éramos',
'eram',
'fui',
'foi',
'fomos',
'foram',
'fora',
'fôramos',
'seja',
'sejamos',
'sejam',
'fosse',
'fôssemos',
'fossem',
'for',
'formos',
'forem',
'serei',
'será',
'seremos',
'serão',
'seria',
'seríamos',
'seriam',
'tenho',
'tem',
'temos',
'tém',
'tinha',
'tínhamos',
'tinham',
'tive',
'teve',
'tivemos',
'tiveram',
'tivera',
'tivéramos',
'tenha',
'tenhamos',
'tenham',
'tivesse',
'tivéssemos',
'tivessem',
'tiver',
'tivermos',
'tiverem',
'terei',
'terá',
'teremos',
'terão',
'teria',
'teríamos',
'teriam',
| |
#!/usr/bin/env python
import socket
import sys
import os
import errno
import argparse
import time
import datetime
import signal
from mps_names import MpsName
from mps_config import MPSConfig, runtime, models
from mps_manager_protocol import *
from runtime import *
from sqlalchemy import func
from epics import PV
from threshold_manager import ThresholdManager
from threshold_restorer import ThresholdRestorer
from ctypes import *
import threading
from threading import Thread, Lock
def signal_hander(sig, frame):
print('=== MpsManager exiting ===')
sys.exit(0)
signal.signal(signal.SIGINT, signal_hander)
class DatabaseReader():
def __init__(self, db_file_name, rt_file_name):
self.mps = MPSConfig(db_file_name, rt_file_name)
self.session = self.mps.session
self.rt_session = self.mps.runtime_session
self.mps_names = MpsName(self.session)
def __del__(self):
self.session.close()
self.rt_session.close()
class ReaderThread(threading.Thread):
def __init__(self, mps_manager, message, conn, ip, port, check_only=False):
threading.Thread.__init__(self)
self.mps_manager = mps_manager
self.message = message
self.conn = conn
self.ip = ip
self.port = port
self.check_only = check_only
self.dbr = DatabaseReader(self.mps_manager.db_file_name, self.mps_manager.rt_file_name)
def run(self):
self.mps_manager.log_string('Reader [START]')
self.mps_manager.db_reader_start()
# Process request
if (self.check_only):
self.mps_manager.check_device_request(self.conn, self.dbr,
self.message.request_device_id,
self.message.request_device_name)
elif (self.message.request_type == int(MpsManagerRequestType.GET_THRESHOLD.value)):
self.mps_manager.get_threshold(self.dbr, self.message, self.conn, self.ip, self.port)
else: # The message.request_device_id contains the app_id
self.mps_manager.restore(self.conn, self.dbr, self.message.request_device_id)
self.mps_manager.db_reader_end()
self.mps_manager.log_string('Reader [END]')
class WriterThread(threading.Thread):
def __init__(self, mps_manager, message, conn, ip, port):
threading.Thread.__init__(self)
self.mps_manager = mps_manager
self.message = message
self.conn = conn
self.ip = ip
self.port = port
self.dbr = DatabaseReader(self.mps_manager.db_file_name, self.mps_manager.rt_file_name)
def run(self):
self.mps_manager.log_string('Writer [START]')
self.mps_manager.db_write_lock.acquire()
self.mps_manager.change_threshold(self.dbr, self.message, self.conn, self.ip, self.port)
self.mps_manager.past_writers += 1
self.mps_manager.db_write_lock.release()
self.mps_manager.log_string('Writer [END]')
class MpsManager:
session = 0
host = 'lcls-dev3'
port = 1975
log_file_name = '/tmp/mps_manager.log'
sock = 0
logFile = None
logFileName = None
log_file_lock = None
messageCount = 0
currentFileName = None
stdout = False
past_readers = 0
past_writers = 0
hb_pv = None
hb_count = 0
def __init__(self, host, port, log_file_name, db_file_name, hb_pv_name, stdout):
self.db_file_name = db_file_name
self.rt_file_name = '{}/{}_runtime.db'.format(os.path.dirname(self.db_file_name),
os.path.basename(self.db_file_name).\
split('.')[0])
self.stdout = stdout
self.host = host
self.port = port
self.log_file_name = log_file_name
self.file = None
self.log_file_lock = Lock()
self.db_read_lock = Lock()
self.db_write_lock = Lock()
self.db_readers = 0
self.readers = []
self.writers = []
if (hb_pv_name != None):
self.hb_pv = PV(hb_pv_name)
if (self.hb_pv.host == None):
print('ERROR: Cannot connect to specified heart beat PV ({})'.format(hb_pv_name))
exit(1)
try:
self.tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcp_server.bind(('0.0.0.0', port))
except socket.error:
print 'Failed to create socket'
sys.exit()
if (self.log_file_name != None):
if (os.path.isfile(self.log_file_name)):
base_name = os.path.basename(self.log_file_name)
dir_name = os.path.dirname(self.log_file_name)
if not '.' in base_name:
backup_file_name = '{}-{}'.format(base_name,
datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'))
else:
backup_file_name = '{}-{}.{}'.format(base_name.split('.')[0],
datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'),
base_name.split('.')[1])
os.rename(self.log_file_name, dir_name + '/' + backup_file_name)
try:
self.file = open(self.log_file_name, 'a', 0)
except IOError as e:
if e.errno == errno.EACCES:
print('ERROR: No permission to write file {}'.format(self.log_file_name))
else:
print('ERROR: errno={}, cannot write to file {}'.format(e.errno, self.log_file_name))
exit(1)
myAddr = (self.host, self.port)
def db_reader_start(self):
self.db_read_lock.acquire()
self.db_readers += 1
if (self.db_readers == 1):
self.db_write_lock.acquire()
self.db_read_lock.release()
def db_reader_end(self):
self.db_read_lock.acquire()
self.db_readers -= 1
if (self.db_readers == 0):
self.db_write_lock.release()
self.past_readers += 1
self.db_read_lock.release()
def cleanup(self):
old=self.readers
self.readers = []
for r in old:
if not r.isAlive():
del r
else:
self.readers.append(r)
old=self.writers
self.writers = []
for r in old:
if not r.isAlive():
del r
else:
self.readers.append(r)
def run(self):
done = False
self.log_string("+== MpsManager Server ==============================")
self.log_string("| Host : {}".format(self.host))
self.log_string("| Port : {}".format(self.port))
self.log_string("| Config Db : {}".format(self.db_file_name))
self.log_string("| Runtime Db: {}".format(self.rt_file_name))
self.log_string("+===================================================")
self.tcp_server.settimeout(5)
while not done:
self.tcp_server.listen(4)
try:
(conn, (ip, port)) = self.tcp_server.accept()
self.process_request(conn, ip, port)
except socket.timeout:
self.heartbeat() # Increment heart beat PV every 5 seconds
self.cleanup() # Removes finished worker threads
if (self.hb_count % 32 == 0):
self.log_stats()
def log_string(self, message):
self.log_file_lock.acquire()
if self.log_file_name != None:
self.file.write('[{}] {}\n'.format(datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'),
str(message)))
if self.stdout:
print('[{}] {}'.format(datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S'),
str(message)))
self.log_file_lock.release()
def log_stats(self):
message = 'Active R={}/W={}, Past R={}/W={}'.\
format(len(self.readers), len(self.writers),
self.past_readers, self.past_writers)
self.log_string(message)
def heartbeat(self):
self.hb_count += 1
try:
if (self.hb_pv != None):
self.hb_pv.put(self.hb_count)
except epics.ca.CASeverityException:
self.log_string('ERROR: Cannot update heartbeat PV ({})'.format(self.hb_pv.pvname))
def decode_message(self, message, conn, ip, port):
if (message.request_type == int(MpsManagerRequestType.RESTORE_APP_THRESHOLDS.value)):
self.log_string('Request for restore app thresholds')
reader = ReaderThread(self, message, conn, ip, port)
self.readers.append(reader)
reader.start()
elif (message.request_type == int(MpsManagerRequestType.CHANGE_THRESHOLD.value)):
self.log_string('Request for change device thresholds')
writer = WriterThread(self, message, conn, ip, port)
self.writers.append(writer)
writer.start()
elif (message.request_type == int(MpsManagerRequestType.GET_THRESHOLD.value)):
self.log_string('Request for current device thresholds')
reader = ReaderThread(self, message, conn, ip, port)
self.readers.append(reader)
reader.start()
elif (message.request_type == int(MpsManagerRequestType.DEVICE_CHECK.value)):
self.log_string('Request for restore app thresholds')
reader = ReaderThread(self, message, conn, ip, port, True)
self.readers.append(reader)
reader.start()
else:
self.log_string('Invalid request type: {}'.format(message.request_type))
def process_request(self, conn, ip, port):
message=MpsManagerRequest()
data = conn.recv(message.size())
if data:
message.unpack(data)
self.decode_message(message, conn, ip, port)
def is_analog(self, dbr, dev_id):
analog_devices = dbr.session.query(models.AnalogDevice).filter(models.AnalogDevice.id==dev_id).all()
if (len(analog_devices)==1):
return True
else:
digital_devices = dbr.session.query(models.DigitalDevice).filter(models.DigitalDevice.id==dev_id).all()
if (len(digital_devices)==0):
self.log_string('ERROR: Device not found (invalid device id {0})'.format(dev_id))
return False
def check_device(self, dbr, dev_id, dev_name):
if (dev_id < 0):
try:
d = dbr.session.query(models.Device).filter(models.Device.name==dev_name).one()
dev_id = d.id
except Exception as e:
print(str(e))
self.log_string('ERROR: Cannot find device with name "{0}" in config database'.format(dev_name))
return None, "name in config database"
else:
try:
d = dbr.session.query(models.Device).filter(models.Device.id==dev_id).one()
except:
self.log_string('ERROR: Cannot find device with id="{0}" in config database'.format(dev_id))
return None, "id not in config database"
try:
rt_d = dbr.rt_session.query(runtime.Device).filter(runtime.Device.id==dev_id).one()
except Exception as e:
print(str(e))
self.log_string('ERROR: Cannot find device with id="{0}" in runtime database'.format(dev_id))
return None, "id in runtime database"
if (rt_d.mpsdb_name != d.name):
self.log_string('ERROR: Device names do not match in config ({0}) and runtime databases ({1})'.\
format(d.name, rt_d.mpsdb_name))
return None, "Invalid names in config/runtime databases"
if (self.is_analog(dbr, dev_id)):
return rt_d, "Analog device"
else:
return rt_d, "Digital device"
def check_analog_device(self, dbr, dev_id, dev_name):
if (dev_id < 0):
try:
d = dbr.session.query(models.Device).filter(models.Device.name==dev_name).one()
dev_id = d.id
except Exception as e:
print(str(e))
self.log_string('ERROR: Cannot find device with name "{0}" in config database'.format(dev_name))
return None, None
if (self.is_analog(dbr, dev_id)):
try:
rt_d = dbr.rt_session.query(runtime.Device).filter(runtime.Device.id==dev_id).one()
except Exception as e:
print(str(e))
self.log_string('ERROR: Cannot find device with id="{0}" in runtime database'.format(dev_id))
return None, None
try:
d = dbr.session.query(models.Device).filter(models.Device.id==dev_id).one()
except:
self.log_string('ERROR: Cannot find device with id="{0}" in config database'.format(dev_id))
return None, None
if (rt_d.mpsdb_name != d.name):
self.log_string('ERROR: Device names do not match in config ({0}) and runtime databases ({1})'.\
format(d.name, rt_d.mpsdb_name))
return None, None
is_bpm = False
if (d.device_type.name == 'BPMS'):
is_bpm = True
else:
self.log_string('ERROR: Cannot set threshold for digital device')
return None, None
return rt_d, is_bpm
def check_device_request(self, conn, dbr, device_id, device_name):
self.log_string('Checking device id={}, name={}'.\
format(device_id, device_name))
rt_d, status_message = self.check_device(dbr, int(device_id), device_name)
response = MpsManagerResponse()
if (rt_d == None):
response.status = int(MpsManagerResponseType.BAD_DEVICE.value)
response.device_id = 0
response.status_message = 'Device not valid'
if (device_id < 0):
response.status_message += ' (name={}, '.format(device_name)
else:
response.status_message += ' (id={}, '.format(device_id)
response.status_message += '{})'.format(status_message)
else:
response.status = int(MpsManagerResponseType.OK.value)
response.device_id = rt_d.mpsdb_id
response.status_message = 'Device is valid (name={}, id={}, info={})'.format(rt_d.mpsdb_name,
rt_d.mpsdb_id,
status_message)
conn.send(response.pack())
def check_analog_device_request(self, conn, dbr, device_id, device_name):
self.log_string('Checking device id={}, name={}'.\
format(device_id, device_name))
rt_d, is_bpm = self.check_analog_device(dbr, int(device_id), device_name)
response = MpsManagerResponse()
if (rt_d == None):
response.status = int(MpsManagerResponseType.BAD_DEVICE.value)
response.device_id = 0
response.status_message = 'Device not valid'
if (device_id < 0):
response.status_message += ' (name={})'.format(device_name)
else:
response.status_message += ' (id={})'.format(device_id)
else:
response.status = int(MpsManagerResponseType.OK.value)
response.device_id = rt_d.mpsdb_id
response.status_message = 'Device is valid (name={}, id={})'.format(rt_d.mpsdb_name,
rt_d.mpsdb_id)
conn.send(response.pack())
return rt_d, is_bpm
def restore(self, conn, dbr, app_id):
self.log_string('Restoring thresholds for app={}'.format(app_id))
# Restore thresholds here
tr = ThresholdRestorer(db=dbr.session, rt_db=dbr.rt_session, mps_names=dbr.mps_names,
force_write=False, verbose=True)
response = MpsManagerResponse()
if (tr.restore(app_id) == False):
response.status = int(MpsManagerResponseType.RESTORE_FAIL.value)
response.status_message = tr.error_message
conn.send(response.pack())
return
else:
if (tr.check(app_id) == False):
response.status = int(MpsManagerResponseType.RESTORE_FAIL.value)
response.status_message = tr.error_message
conn.send(response.pack())
return
else:
if (tr.release() == False):
response.status = int(MpsManagerResponseType.RESTORE_FAIL.value)
response.status_message = tr.error_message
conn.send(response.pack())
return
response.status = int(MpsManagerResponseType.OK.value)
response.device_id = app_id
response.status_message = 'Thresholds restored for app {}'.format(app_id)
conn.send(response.pack())
def get_threshold(self, dbr, message, conn, ip, port):
self.log_string('Getting thresholds for device id={}, name={}'.\
format(message.request_device_id, message.request_device_name))
rt_d, is_bpm = self.check_analog_device_request(conn, dbr, int(message.request_device_id),
message.request_device_name)
if rt_d == None:
self.log_string('Get threshold: invalid device')
return
tm = ThresholdManager(dbr.session, dbr.rt_session, dbr.mps_names)
threshold_message = tm.get_thresholds(rt_d, is_bpm)
threshold_message.device_name = message.request_device_name
threshold_message.device_id = message.request_device_id
conn.send(threshold_message.pack())
def change_threshold(self, dbr, message, conn, ip, port):
self.log_string('Checking device id={}, name={}'.\
format(message.request_device_id, message.request_device_name))
rt_d, is_bpm = self.check_analog_device_request(conn, dbr, int(message.request_device_id),
message.request_device_name)
if rt_d == None:
self.log_string('Change threshold: invalid device')
return
# Receive list of thresholds to be changed
threshold_message = MpsManagerThresholdRequest()
data = conn.recv(threshold_message.size())
print('Received {} bytes'.format(len(data)))
threshold_message.unpack(data)
tm = ThresholdManager(dbr.session, dbr.rt_session, dbr.mps_names)
log, error_pvs, status = tm.change_thresholds(rt_d, threshold_message.user_name,
threshold_message.reason, is_bpm,
threshold_message.lc1_active, threshold_message.lc1_value,
threshold_message.idl_active, threshold_message.idl_value,
threshold_message.lc2_active, threshold_message.lc2_value,
threshold_message.alt_active, threshold_message.alt_value,
threshold_message.disable)
self.log_string('\n' + log + ': | |
colour the RGB strip has been set to."
}
def action__fade(self, request):
"""
Run when user wants to set a colour to a specified value
"""
fade_colour = request.get_param("fade", force=unicode)
logging.info("Fade to: %s" % fade_colour)
return self.led_strip.fade(fade_colour)
action__fade.capability = {
"param": "fade",
"description": "Fades the RGB strip from its current colour to a specified colour.",
"value": "<unicode> A named colour (e.g. 'pink') or colour hex value (e.g. '#19BECA').",
"validity": "<unicode> A known named colour, or valid colour hex in the range #000000-#FFFFFF",
"returns": "<unicode> The hex value of the colour the RGB strip has been set to."
}
def action__sunrise(self, request):
"""
Performs a sunrise over the specified period of time
"""
seconds = request.get_param(["seconds", "s", "sunrise"], default=10.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
temp_start = request.get_param(['temp_start', 'K'], default=None, force=unicode)
temp_end = request.get_param('temp_end', default=None, force=unicode)
logging.info("Sunrise: %s seconds" % (seconds + (milliseconds / 1000.0)))
return self.led_strip.sunrise(seconds=seconds, milliseconds=milliseconds, temp_start=temp_start, temp_end=temp_end)
action__sunrise.capability = {
"param": "sunrise",
"description": "Gently fades-in the RGB strip from deep red to daylight.",
"value": "The number of seconds you would like the sunrise to take.",
"validity": "<float> > 0",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the sunrise should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "1000",
},
{
"param": "temp_start",
"value": "The colour temperature you wish to start from (e.g. 500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "6500K"
},
{
"param": "temp_end",
"value": "The colour temperature you wish to finish at (e.g. 4500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "500K"
}
],
"returns": "<unicode> The hex value of the colour the RGB strip has been set to."
}
def action__sunset(self, request):
"""
Performs a sunset over the specified period of time
"""
seconds = request.get_param(["seconds", "s", "sunset"], default=10.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
temp_start = request.get_param(['temp_start', 'K'], default=None, force=unicode)
temp_end = request.get_param('temp_end', default=None, force=unicode)
logging.info("Sunset: %s seconds" % (seconds + (milliseconds / 1000.0)))
return self.led_strip.sunset(seconds=seconds, milliseconds=milliseconds, temp_start=temp_start, temp_end=temp_end)
action__sunset.capability = {
"param": "sunset",
"description": "Gently fades-out the RGB strip from daylight to deep-red.",
"value": "The number of seconds you would like the sunrise to take.",
"validity": "<float> > 0",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the sunset should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "1000",
},
{
"param": "temp_start",
"value": "The colour temperature you wish to start from (e.g. 500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "500K"
},
{
"param": "temp_end",
"value": "The colour temperature you wish to finish at (e.g. 4500K).",
"validity": "<unicode> Matches a named colour temperature (50K - 15000K in 100 Kelvin steps)",
"default": "6500K"
}
],
"returns": ""
}
def action__jump(self, request):
"""
Jump from one specified colour to the next
"""
jump_colours = request.get_param_values("jump")
seconds = request.get_param(["seconds", "s"], default=0.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
self.led_strip.stop_current_sequence() # Terminate any crap that's going on
total_seconds = (seconds + (milliseconds / 1000.0))
logging.info("Jump: %s, %s seconds" % (jump_colours, total_seconds))
return self.led_strip.jump(jump_colours, seconds=seconds, milliseconds=milliseconds) # Has its own colour sanitisation routine
action__jump.capability = {
"param": "jump",
"description": "Hops from one colour to the next over an even period of time.",
"value": "A comma delimited list of colours you wish to jump between.",
"validity": "<unicode> valid colour names or hex values separated by commas (e.g. red,blue,green,cyan,#FF00FF)",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the each colour should be displayed for. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "200",
},
{
"param": "seconds",
"value": "The number of seconds each colour should be displayed for. Will be added to milliseconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "0",
},
],
"returns": "<unicode> The first hex value of sequence."
}
def action__rotate(self, request):
"""
Rotates (fades) from one specified colour to the next
"""
rotate_colours = request.get_param_values("rotate")
seconds = request.get_param(["seconds", "s"], default=0.0, force=float)
milliseconds = request.get_param(["milliseconds", "ms"], default=0.0, force=float)
self.led_strip.stop_current_sequence() # Terminate any crap that's going on
total_seconds = (seconds + (milliseconds / 1000.0))
logging.info("Rotate: %s, %s seconds" % (rotate_colours, total_seconds))
return self.led_strip.rotate(rotate_colours, seconds=seconds, milliseconds=milliseconds) # Has its own colour sanitisation routine
action__rotate.capability = {
"param": "rotate",
"description": "Fades from one colour to the next over an even period of time.",
"value": "A comma delimited list of colours you wish to cross-fade between.",
"validity": "<unicode> valid colour names or hex values separated by commas (e.g. red,blue,green,cyan,#FF00FF)",
"optional_concurrent_parameters": [
{
"param": "milliseconds",
"value": "The number of milliseconds the each colour fade should take. Will be added to seconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "200",
},
{
"param": "seconds",
"value": "The number of seconds each colour fade should take. Will be added to milliseconds (if specified) to give a total time.",
"validity": "<int> > 0",
"default": "0",
},
],
"returns": "<unicode> The first hex value of sequence."
}
def action__stop(self, request):
"""
Stops the current sequence
"""
return self.led_strip.stop()
action__stop.capability = {
"param": "stop",
"description": "Halts the current sequence or fade.",
"value": "",
"returns": "<unicode> The hex value of colour the RGB strip got halted on."
}
def action__off(self, request):
"""
Turns the strip off
"""
logging.info("Off!")
return self.led_strip.off()
action__off.capability = {
"param": "off",
"description": "Stops any fades or sequences. Quickly Fades the RGB strip to black (no light)",
"value": "",
"returns": "<unicode> The hex value of colour the RGB strip ends up at (#000000)."
}
def information__status(self, request, *args, **kwargs):
"""
Reports the status of the RGB LED strip
"""
current_rgb = "({})".format(self.led_strip)
current_hex = self.led_strip.hex
contrast_colour = self.led_strip.contrast_from_bg(current_hex, dark_default="202020")
return {
"sequence": self.led_strip.sequence_colours,
"current_hex": current_hex,
"current": current_rgb,
"current_colour": current_rgb,
"current_rgb": current_rgb,
"contrast": contrast_colour,
"contrast_colour": contrast_colour
}
def teardown(self):
"""
Called automatically when exiting the parent reactor
"""
self.led_strip.teardown()
class NotSet():
pass
NOT_SET = NotSet()
class SmartRequest(Request, object):
"""
The class for request objects returned by our web server.
This child version has methods for easily grabbing params safely.
Usage:
#If you just want the first value
sunset = request["sunset"]
sunset = request.get_param("sunset")
#You can even test the water with multiple values, it will stop at the first valid one
sunset = request.get_param(["sunset","ss","twilight"])
#If you want a whole list of values
jump = request.get_list("jump")
See docs: https://twistedmatrix.com/documents/8.0.0/api/twisted.web.server.Request.html
"""
def __init__(self, *args, **kwargs):
super(SmartRequest, self).__init__(*args, **kwargs)
def get_param_values(self, name, default=None):
"""
Failsafe way of getting querystring get and post params from the Request object
If not provided, will return default
@return: ["val1","val2"] LIST of arguments, or the default
"""
return self.args.get(name, default)
get_params = get_param_values # Alias
get_list = get_param_values # Alias
get_params_list = get_param_values # Alias
def get_param(self, names, default=None, force=None):
"""
Failsafe way of getting a single querystring value. Will only return one (the first) value if found
@param names: <str> The name of the param to fetch, or a list of candidate names to try
@keyword default: The default value to return if we cannot get a valid value
@keyword force: <type> A class / type to force the output into. Default is returned if we cannot force the value into this type
"""
if isinstance(names, (str, unicode)):
names = [names]
val = NOT_SET
for name in names:
val = self.get_param_values(name=name, default=NOT_SET)
if val is not NOT_SET: # Once we find a valid value, continue
break
# If we have no valid value, then bail
if val is NOT_SET:
return default
try:
if len(val) == 1:
single_val = val[0]
if force is not None:
return force(single_val)
return single_val
else:
mult_val = val
if force is not None:
mult_val = | |
auth_permission_44.name = 'Can view content type'
auth_permission_44.content_type = ContentType.objects.get(app_label="contenttypes", model="contenttype")
auth_permission_44.codename = 'view_contenttype'
auth_permission_44 = importer.save_or_locate(auth_permission_44)
auth_permission_45 = Permission()
auth_permission_45.name = 'Can add Dataset'
auth_permission_45.content_type = ContentType.objects.get(app_label="datastore", model="datasetmodel")
auth_permission_45.codename = 'add_datasetmodel'
auth_permission_45 = importer.save_or_locate(auth_permission_45)
auth_permission_46 = Permission()
auth_permission_46.name = 'Can change Dataset'
auth_permission_46.content_type = ContentType.objects.get(app_label="datastore", model="datasetmodel")
auth_permission_46.codename = 'change_datasetmodel'
auth_permission_46 = importer.save_or_locate(auth_permission_46)
auth_permission_47 = Permission()
auth_permission_47.name = 'Can delete Dataset'
auth_permission_47.content_type = ContentType.objects.get(app_label="datastore", model="datasetmodel")
auth_permission_47.codename = 'delete_datasetmodel'
auth_permission_47 = importer.save_or_locate(auth_permission_47)
auth_permission_48 = Permission()
auth_permission_48.name = 'Can view Dataset'
auth_permission_48.content_type = ContentType.objects.get(app_label="datastore", model="datasetmodel")
auth_permission_48.codename = 'view_datasetmodel'
auth_permission_48 = importer.save_or_locate(auth_permission_48)
auth_permission_49 = Permission()
auth_permission_49.name = 'Can add Entry'
auth_permission_49.content_type = ContentType.objects.get(app_label="datastore", model="entrymodel")
auth_permission_49.codename = 'add_entrymodel'
auth_permission_49 = importer.save_or_locate(auth_permission_49)
auth_permission_50 = Permission()
auth_permission_50.name = 'Can change Entry'
auth_permission_50.content_type = ContentType.objects.get(app_label="datastore", model="entrymodel")
auth_permission_50.codename = 'change_entrymodel'
auth_permission_50 = importer.save_or_locate(auth_permission_50)
auth_permission_51 = Permission()
auth_permission_51.name = 'Can delete Entry'
auth_permission_51.content_type = ContentType.objects.get(app_label="datastore", model="entrymodel")
auth_permission_51.codename = 'delete_entrymodel'
auth_permission_51 = importer.save_or_locate(auth_permission_51)
auth_permission_52 = Permission()
auth_permission_52.name = 'Can view Entry'
auth_permission_52.content_type = ContentType.objects.get(app_label="datastore", model="entrymodel")
auth_permission_52.codename = 'view_entrymodel'
auth_permission_52 = importer.save_or_locate(auth_permission_52)
auth_permission_53 = Permission()
auth_permission_53.name = 'Can add schema model'
auth_permission_53.content_type = ContentType.objects.get(app_label="datastore", model="schemamodel")
auth_permission_53.codename = 'add_schemamodel'
auth_permission_53 = importer.save_or_locate(auth_permission_53)
auth_permission_54 = Permission()
auth_permission_54.name = 'Can change schema model'
auth_permission_54.content_type = ContentType.objects.get(app_label="datastore", model="schemamodel")
auth_permission_54.codename = 'change_schemamodel'
auth_permission_54 = importer.save_or_locate(auth_permission_54)
auth_permission_55 = Permission()
auth_permission_55.name = 'Can delete schema model'
auth_permission_55.content_type = ContentType.objects.get(app_label="datastore", model="schemamodel")
auth_permission_55.codename = 'delete_schemamodel'
auth_permission_55 = importer.save_or_locate(auth_permission_55)
auth_permission_56 = Permission()
auth_permission_56.name = 'Can view schema model'
auth_permission_56.content_type = ContentType.objects.get(app_label="datastore", model="schemamodel")
auth_permission_56.codename = 'view_schemamodel'
auth_permission_56 = importer.save_or_locate(auth_permission_56)
auth_permission_57 = Permission()
auth_permission_57.name = 'Can add Argument'
auth_permission_57.content_type = ContentType.objects.get(app_label="engine", model="argumentmodel")
auth_permission_57.codename = 'add_argumentmodel'
auth_permission_57 = importer.save_or_locate(auth_permission_57)
auth_permission_58 = Permission()
auth_permission_58.name = 'Can change Argument'
auth_permission_58.content_type = ContentType.objects.get(app_label="engine", model="argumentmodel")
auth_permission_58.codename = 'change_argumentmodel'
auth_permission_58 = importer.save_or_locate(auth_permission_58)
auth_permission_59 = Permission()
auth_permission_59.name = 'Can delete Argument'
auth_permission_59.content_type = ContentType.objects.get(app_label="engine", model="argumentmodel")
auth_permission_59.codename = 'delete_argumentmodel'
auth_permission_59 = importer.save_or_locate(auth_permission_59)
auth_permission_60 = Permission()
auth_permission_60.name = 'Can view Argument'
auth_permission_60.content_type = ContentType.objects.get(app_label="engine", model="argumentmodel")
auth_permission_60.codename = 'view_argumentmodel'
auth_permission_60 = importer.save_or_locate(auth_permission_60)
auth_permission_61 = Permission()
auth_permission_61.name = 'Can add execution model'
auth_permission_61.content_type = ContentType.objects.get(app_label="engine", model="executionmodel")
auth_permission_61.codename = 'add_executionmodel'
auth_permission_61 = importer.save_or_locate(auth_permission_61)
auth_permission_62 = Permission()
auth_permission_62.name = 'Can change execution model'
auth_permission_62.content_type = ContentType.objects.get(app_label="engine", model="executionmodel")
auth_permission_62.codename = 'change_executionmodel'
auth_permission_62 = importer.save_or_locate(auth_permission_62)
auth_permission_63 = Permission()
auth_permission_63.name = 'Can delete execution model'
auth_permission_63.content_type = ContentType.objects.get(app_label="engine", model="executionmodel")
auth_permission_63.codename = 'delete_executionmodel'
auth_permission_63 = importer.save_or_locate(auth_permission_63)
auth_permission_64 = Permission()
auth_permission_64.name = 'Can view execution model'
auth_permission_64.content_type = ContentType.objects.get(app_label="engine", model="executionmodel")
auth_permission_64.codename = 'view_executionmodel'
auth_permission_64 = importer.save_or_locate(auth_permission_64)
auth_permission_65 = Permission()
auth_permission_65.name = 'Can add Process'
auth_permission_65.content_type = ContentType.objects.get(app_label="engine", model="processmodel")
auth_permission_65.codename = 'add_processmodel'
auth_permission_65 = importer.save_or_locate(auth_permission_65)
auth_permission_66 = Permission()
auth_permission_66.name = 'Can change Process'
auth_permission_66.content_type = ContentType.objects.get(app_label="engine", model="processmodel")
auth_permission_66.codename = 'change_processmodel'
auth_permission_66 = importer.save_or_locate(auth_permission_66)
auth_permission_67 = Permission()
auth_permission_67.name = 'Can delete Process'
auth_permission_67.content_type = ContentType.objects.get(app_label="engine", model="processmodel")
auth_permission_67.codename = 'delete_processmodel'
auth_permission_67 = importer.save_or_locate(auth_permission_67)
auth_permission_68 = Permission()
auth_permission_68.name = 'Can view Process'
auth_permission_68.content_type = ContentType.objects.get(app_label="engine", model="processmodel")
auth_permission_68.codename = 'view_processmodel'
auth_permission_68 = importer.save_or_locate(auth_permission_68)
auth_permission_69 = Permission()
auth_permission_69.name = 'Can add Airport'
auth_permission_69.content_type = ContentType.objects.get(app_label="mdm", model="airportmodel")
auth_permission_69.codename = 'add_airportmodel'
auth_permission_69 = importer.save_or_locate(auth_permission_69)
auth_permission_70 = Permission()
auth_permission_70.name = 'Can change Airport'
auth_permission_70.content_type = ContentType.objects.get(app_label="mdm", model="airportmodel")
auth_permission_70.codename = 'change_airportmodel'
auth_permission_70 = importer.save_or_locate(auth_permission_70)
auth_permission_71 = Permission()
auth_permission_71.name = 'Can delete Airport'
auth_permission_71.content_type = ContentType.objects.get(app_label="mdm", model="airportmodel")
auth_permission_71.codename = 'delete_airportmodel'
auth_permission_71 = importer.save_or_locate(auth_permission_71)
auth_permission_72 = Permission()
auth_permission_72.name = 'Can view Airport'
auth_permission_72.content_type = ContentType.objects.get(app_label="mdm", model="airportmodel")
auth_permission_72.codename = 'view_airportmodel'
auth_permission_72 = importer.save_or_locate(auth_permission_72)
auth_permission_73 = Permission()
auth_permission_73.name = 'Can add Bagtype'
auth_permission_73.content_type = ContentType.objects.get(app_label="mdm", model="bagtypemodel")
auth_permission_73.codename = 'add_bagtypemodel'
auth_permission_73 = importer.save_or_locate(auth_permission_73)
auth_permission_74 = Permission()
auth_permission_74.name = 'Can change Bagtype'
auth_permission_74.content_type = ContentType.objects.get(app_label="mdm", model="bagtypemodel")
auth_permission_74.codename = 'change_bagtypemodel'
auth_permission_74 = importer.save_or_locate(auth_permission_74)
auth_permission_75 = Permission()
auth_permission_75.name = 'Can delete Bagtype'
auth_permission_75.content_type = ContentType.objects.get(app_label="mdm", model="bagtypemodel")
auth_permission_75.codename = 'delete_bagtypemodel'
auth_permission_75 = importer.save_or_locate(auth_permission_75)
auth_permission_76 = Permission()
auth_permission_76.name = 'Can view Bagtype'
auth_permission_76.content_type = ContentType.objects.get(app_label="mdm", model="bagtypemodel")
auth_permission_76.codename = 'view_bagtypemodel'
auth_permission_76 = importer.save_or_locate(auth_permission_76)
auth_permission_77 = Permission()
auth_permission_77.name = 'Can add Board'
auth_permission_77.content_type = ContentType.objects.get(app_label="mdm", model="boardmodel")
auth_permission_77.codename = 'add_boardmodel'
auth_permission_77 = importer.save_or_locate(auth_permission_77)
auth_permission_78 = Permission()
auth_permission_78.name = 'Can change Board'
auth_permission_78.content_type = ContentType.objects.get(app_label="mdm", model="boardmodel")
auth_permission_78.codename = 'change_boardmodel'
auth_permission_78 = importer.save_or_locate(auth_permission_78)
auth_permission_79 = Permission()
auth_permission_79.name = 'Can delete Board'
auth_permission_79.content_type = ContentType.objects.get(app_label="mdm", model="boardmodel")
auth_permission_79.codename = 'delete_boardmodel'
auth_permission_79 = importer.save_or_locate(auth_permission_79)
auth_permission_80 = Permission()
auth_permission_80.name = 'Can view Board'
auth_permission_80.content_type = ContentType.objects.get(app_label="mdm", model="boardmodel")
auth_permission_80.codename = 'view_boardmodel'
auth_permission_80 = importer.save_or_locate(auth_permission_80)
auth_permission_81 = Permission()
auth_permission_81.name = 'Can add Flight provider'
auth_permission_81.content_type = ContentType.objects.get(app_label="mdm", model="flightprovidermodel")
auth_permission_81.codename = 'add_flightprovidermodel'
auth_permission_81 = importer.save_or_locate(auth_permission_81)
auth_permission_82 = Permission()
auth_permission_82.name = 'Can change Flight provider'
auth_permission_82.content_type = ContentType.objects.get(app_label="mdm", model="flightprovidermodel")
auth_permission_82.codename = 'change_flightprovidermodel'
auth_permission_82 = importer.save_or_locate(auth_permission_82)
auth_permission_83 = Permission()
auth_permission_83.name = 'Can delete Flight provider'
auth_permission_83.content_type = ContentType.objects.get(app_label="mdm", model="flightprovidermodel")
auth_permission_83.codename = 'delete_flightprovidermodel'
auth_permission_83 = importer.save_or_locate(auth_permission_83)
auth_permission_84 = Permission()
auth_permission_84.name = 'Can view Flight provider'
auth_permission_84.content_type = ContentType.objects.get(app_label="mdm", model="flightprovidermodel")
auth_permission_84.codename = 'view_flightprovidermodel'
auth_permission_84 = importer.save_or_locate(auth_permission_84)
auth_permission_85 = Permission()
auth_permission_85.name = 'Can add Hotel'
auth_permission_85.content_type = ContentType.objects.get(app_label="mdm", model="hotelmodel")
auth_permission_85.codename = 'add_hotelmodel'
auth_permission_85 = importer.save_or_locate(auth_permission_85)
auth_permission_86 = Permission()
auth_permission_86.name = 'Can change Hotel'
auth_permission_86.content_type = ContentType.objects.get(app_label="mdm", model="hotelmodel")
auth_permission_86.codename = 'change_hotelmodel'
auth_permission_86 = importer.save_or_locate(auth_permission_86)
auth_permission_87 = Permission()
auth_permission_87.name = 'Can delete Hotel'
auth_permission_87.content_type = ContentType.objects.get(app_label="mdm", model="hotelmodel")
auth_permission_87.codename = 'delete_hotelmodel'
auth_permission_87 = importer.save_or_locate(auth_permission_87)
auth_permission_88 = Permission()
auth_permission_88.name = 'Can view Hotel'
auth_permission_88.content_type = ContentType.objects.get(app_label="mdm", model="hotelmodel")
auth_permission_88.codename = 'view_hotelmodel'
auth_permission_88 = importer.save_or_locate(auth_permission_88)
auth_permission_89 = Permission()
auth_permission_89.name = 'Can add Market'
auth_permission_89.content_type = ContentType.objects.get(app_label="mdm", model="marketmodel")
auth_permission_89.codename = 'add_marketmodel'
auth_permission_89 = importer.save_or_locate(auth_permission_89)
auth_permission_90 = Permission()
auth_permission_90.name = 'Can change Market'
auth_permission_90.content_type = ContentType.objects.get(app_label="mdm", model="marketmodel")
auth_permission_90.codename = 'change_marketmodel'
auth_permission_90 = importer.save_or_locate(auth_permission_90)
auth_permission_91 = Permission()
auth_permission_91.name = 'Can delete Market'
auth_permission_91.content_type = ContentType.objects.get(app_label="mdm", model="marketmodel")
auth_permission_91.codename = 'delete_marketmodel'
auth_permission_91 = importer.save_or_locate(auth_permission_91)
auth_permission_92 = Permission()
auth_permission_92.name = 'Can view Market'
auth_permission_92.content_type = ContentType.objects.get(app_label="mdm", model="marketmodel")
auth_permission_92.codename = 'view_marketmodel'
auth_permission_92 = importer.save_or_locate(auth_permission_92)
auth_permission_93 = Permission()
auth_permission_93.name = 'Can add Roomtype'
auth_permission_93.content_type = ContentType.objects.get(app_label="mdm", model="roomtypemodel")
auth_permission_93.codename = 'add_roomtypemodel'
auth_permission_93 = importer.save_or_locate(auth_permission_93)
auth_permission_94 = Permission()
auth_permission_94.name = 'Can change Roomtype'
auth_permission_94.content_type = ContentType.objects.get(app_label="mdm", model="roomtypemodel")
auth_permission_94.codename = 'change_roomtypemodel'
auth_permission_94 = importer.save_or_locate(auth_permission_94)
auth_permission_95 = Permission()
auth_permission_95.name = 'Can delete Roomtype'
auth_permission_95.content_type = ContentType.objects.get(app_label="mdm", model="roomtypemodel")
auth_permission_95.codename = 'delete_roomtypemodel'
auth_permission_95 = importer.save_or_locate(auth_permission_95)
auth_permission_96 = Permission()
auth_permission_96.name = 'Can view Roomtype'
auth_permission_96.content_type = ContentType.objects.get(app_label="mdm", model="roomtypemodel")
auth_permission_96.codename = 'view_roomtypemodel'
auth_permission_96 = importer.save_or_locate(auth_permission_96)
auth_permission_97 = Permission()
auth_permission_97.name = 'Can add Supplier'
auth_permission_97.content_type = ContentType.objects.get(app_label="mdm", model="suppliermodel")
auth_permission_97.codename = 'add_suppliermodel'
auth_permission_97 = importer.save_or_locate(auth_permission_97)
auth_permission_98 = Permission()
auth_permission_98.name = 'Can change Supplier'
auth_permission_98.content_type = ContentType.objects.get(app_label="mdm", model="suppliermodel")
auth_permission_98.codename = 'change_suppliermodel'
auth_permission_98 = importer.save_or_locate(auth_permission_98)
auth_permission_99 = Permission()
auth_permission_99.name = 'Can delete Supplier'
auth_permission_99.content_type = ContentType.objects.get(app_label="mdm", model="suppliermodel")
auth_permission_99.codename = 'delete_suppliermodel'
auth_permission_99 = importer.save_or_locate(auth_permission_99)
auth_permission_100 = Permission()
auth_permission_100.name = 'Can view Supplier'
auth_permission_100.content_type = ContentType.objects.get(app_label="mdm", model="suppliermodel")
auth_permission_100.codename = 'view_suppliermodel'
auth_permission_100 = importer.save_or_locate(auth_permission_100)
auth_permission_101 = Permission()
auth_permission_101.name = 'Can add session'
auth_permission_101.content_type = ContentType.objects.get(app_label="sessions", model="session")
auth_permission_101.codename = 'add_session'
auth_permission_101 = importer.save_or_locate(auth_permission_101)
auth_permission_102 = Permission()
auth_permission_102.name = 'Can change session'
auth_permission_102.content_type = ContentType.objects.get(app_label="sessions", model="session")
auth_permission_102.codename = 'change_session'
auth_permission_102 = importer.save_or_locate(auth_permission_102)
auth_permission_103 = Permission()
auth_permission_103.name = 'Can delete session'
auth_permission_103.content_type = ContentType.objects.get(app_label="sessions", model="session")
auth_permission_103.codename = 'delete_session'
auth_permission_103 = importer.save_or_locate(auth_permission_103)
auth_permission_104 = Permission()
auth_permission_104.name = 'Can view session'
auth_permission_104.content_type = ContentType.objects.get(app_label="sessions", model="session")
auth_permission_104.codename = 'view_session'
auth_permission_104 = importer.save_or_locate(auth_permission_104)
auth_permission_105 = Permission()
auth_permission_105.name = 'Can add site'
auth_permission_105.content_type = ContentType.objects.get(app_label="sites", model="site")
auth_permission_105.codename = 'add_site'
auth_permission_105 = importer.save_or_locate(auth_permission_105)
auth_permission_106 = Permission()
auth_permission_106.name = 'Can change site'
auth_permission_106.content_type = ContentType.objects.get(app_label="sites", model="site")
auth_permission_106.codename = 'change_site'
auth_permission_106 = importer.save_or_locate(auth_permission_106)
auth_permission_107 = Permission()
auth_permission_107.name = 'Can delete site'
auth_permission_107.content_type = ContentType.objects.get(app_label="sites", model="site")
auth_permission_107.codename = 'delete_site'
auth_permission_107 = importer.save_or_locate(auth_permission_107)
auth_permission_108 = Permission()
auth_permission_108.name = 'Can view site'
auth_permission_108.content_type = ContentType.objects.get(app_label="sites", model="site")
auth_permission_108.codename = 'view_site'
auth_permission_108 = importer.save_or_locate(auth_permission_108)
# Processing model: django.contrib.auth.models.Group
from django.contrib.auth.models import Group
auth_group_1 = Group()
auth_group_1.name = 'reviewer'
auth_group_1 = importer.save_or_locate(auth_group_1)
auth_group_1.permissions.add(auth_permission_57)
auth_group_1.permissions.add(auth_permission_58)
auth_group_1.permissions.add(auth_permission_59)
auth_group_1.permissions.add(auth_permission_61)
auth_group_1.permissions.add(auth_permission_62)
auth_group_1.permissions.add(auth_permission_63)
auth_group_1.permissions.add(auth_permission_65)
auth_group_1.permissions.add(auth_permission_66)
auth_group_1.permissions.add(auth_permission_67)
auth_group_2 = Group()
auth_group_2.name = 'contentmanager'
auth_group_2 = importer.save_or_locate(auth_group_2)
auth_group_3 = Group()
auth_group_3.name = 'datarecorder'
auth_group_3 = importer.save_or_locate(auth_group_3)
auth_group_4 = Group()
auth_group_4.name = 'analyst'
auth_group_4 = importer.save_or_locate(auth_group_4)
auth_group_5 = Group()
auth_group_5.name = 'customer'
auth_group_5 = importer.save_or_locate(auth_group_5)
# Processing model: django.contrib.auth.models.User
from django.contrib.auth.models import User
auth_user_1 = User()
auth_user_1.password = '<PASSWORD>='
auth_user_1.last_login = None
auth_user_1.is_superuser = False
auth_user_1.username = 'test-customer'
auth_user_1.first_name = 'Customer'
auth_user_1.last_name = 'Test'
auth_user_1.email = '<EMAIL>'
auth_user_1.is_staff = False
auth_user_1.is_active = True
| |
<gh_stars>1-10
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on April 30, 2018
@author: mandd
"""
from __future__ import division, print_function , unicode_literals, absolute_import
#Internal Modules---------------------------------------------------------------
import MessageHandler
from utils import utils
from utils import xmlUtils as xmlU
import MessageHandler
#Internal Modules End-----------------------------------------------------------
#External Modules---------------------------------------------------------------
import numpy as np
import xml.etree.ElementTree as ET
import copy
import itertools
from collections import OrderedDict
#External Modules End-----------------------------------------------------------
class ETStructure(object):
"""
This is the base class of the ET structure which actually handles ET structures which is used by the ETimporter and the ETmodel
"""
def __init__(self, expand, inputs):
"""
This method executes the post-processor action.
@ In, inputs, list, list of file objects
@ In, expand, bool, boolean variable which indicates if the ET needs to be factorially expanded
@ Out, None
"""
self.expand = expand
### Check for link to other ET
links = []
sizes=(len(inputs),len(inputs))
connectivityMatrix = np.zeros(sizes)
listETs=[]
listRoots=[]
for fileID in inputs:
eventTree = ET.parse(fileID.getPath() + fileID.getFilename())
listETs.append(eventTree.getroot().get('name'))
listRoots.append(eventTree.getroot())
links = self.createLinkList(listRoots)
if len(inputs)>0:
rootETID = self.checkETStructure(links,listETs,connectivityMatrix)
if len(links)>=1 and len(inputs)>1:
finalAssembledTree = self.analyzeMultipleET(inputs,links,listRoots,listETs,rootETID)
self.pointSet = self.analyzeSingleET(finalAssembledTree)
elif len(links)==0 and len(inputs)>1:
raise IOError('Multiple ET files have provided but they are not linked')
elif len(links)>1 and len(inputs)==1:
raise IOError('A single ET files has provided but it contains a link to an additional ET')
elif len(links)==0 and len(inputs)==1:
eventTree = ET.parse(inputs[0].getPath() + inputs[0].getFilename())
self.pointSet = self.analyzeSingleET(eventTree.getroot())
def solve(self,combination):
"""
This method provides the sequence of the ET given the status of its branching conditions
@ In, combination, dict, values of all ET branching conditions
@ In, outcome, float, sequence of the ET corresponding to the provided ET branching conditions
"""
combinationArray=np.zeros(len(self.variables))
outcome = -1
for index, var in enumerate(self.variables):
combinationArray[index] = combination[var]
inputData = self.pointSet[:,:len(self.variables)]
for row in self.pointSet:
if np.array_equal(row[:len(self.variables)],combinationArray):
outcome = row[-1]
return outcome
def returnDict(self):
"""
This method returns the ET data
@ In, None
@ Out, (outputDict,self.variables), tuple, tuple containing 1) outputDict (dict, dictionary containing the
values of all ET branching conditions) and 2) self.variables
(list, IDs of the ET branching conditions)
"""
outputDict = {}
for index, var in enumerate(self.variables):
outputDict[var] = self.pointSet[:, index]
outputDict['sequence'] = self.pointSet[:, -1]
return outputDict, self.variables
def createLinkList(self,listRoots):
"""
This method identifies the links among ETs. It saves such links in the variable links.
Note that this method overwrites such variable when it is called. This is because the identification
of the links needs to be computed from scratch after every merging step since the structure of ETs has changed.
The variable links=[dep1,...,depN] is a list of connections where each connection dep is a
dictionary as follows:
dep.keys =
* link_seqID : ID of the link in the master ET
* ET_slave_ID : ID of the slave ET that needs to be copied into the master ET
* ET_master_ID: ID of the master ET;
The slave ET is merged into the master ET; note the master ET contains the link in at least one
<define-sequence> node:
<define-sequence name="Link-to-LP">
<event-tree name="Link-to-LP-Event-Tree"/>
</define-sequence>
@ In, listRoots, list, list containing the root of all ETs
@ Out, linkList, list, list containing the links among the ETs
"""
linkList = []
for root in listRoots:
links, seqID = self.checkLinkedTree(root)
if len(links) > 0:
for idx, val in enumerate(links):
dep = {}
dep['link_seqID'] = copy.deepcopy(seqID[idx])
dep['ET_slave_ID'] = copy.deepcopy(val)
dep['ET_master_ID'] = copy.deepcopy(root.get('name'))
linkList.append(dep)
return linkList
def checkETStructure(self,links,listETs,connectivityMatrix):
"""
This method checks that the structure of the ET is consistent. In particular, it checks that only one root ET
and at least one leaf ET is provided. As an example consider the following ET structure:
ET1 ----> ET2 ----> ET3
|------> ET4 ----> ET5
Five ETs have been provided, ET1 is the only root ET while ET3 and ET5 are leaf ET.
@ In, links, list, list containing all the link connectivities among ETs
@ In, listETs, list, list containing the ID of the ETs
@ In, connectivityMatrix, np.array, matrix containing connectivity mapping
@ Out, rootETID, xml.etree.Element, root of the main ET
"""
# each element (i,j) of the matrix connectivityMatrix shows if there is a connection from ET_i to ET_j:
# * 0: no connection from i to j
# * 1: a connection exists from i to j
for link in links:
row = listETs.index(link['ET_master_ID'])
col = listETs.index(link['ET_slave_ID'])
connectivityMatrix[row,col]=1.0
# the root ETs are charaterized by a column full of zeros
# the leaf ETs are charaterized by a row full of zeros
zeroRows = np.where(~connectivityMatrix.any(axis=1))[0]
zeroColumns = np.where(~connectivityMatrix.any(axis=0))[0]
if len(zeroColumns)>1:
raise IOError('Multiple root ET')
if len(zeroColumns)==0:
raise IOError('No root ET')
if len(zeroColumns)==1:
rootETID = listETs[zeroColumns.astype(int)[0]]
print("ETImporter Root ET: " + str(rootETID))
leafs = []
for index in np.nditer(zeroRows):
leafs.append(listETs[index])
print("ETImporter leaf ETs: " + str(leafs))
return rootETID
def analyzeMultipleET(self,inputs,links,listRoots,listETs,rootETID):
"""
This method executes the analysis of the ET if multiple ETs are provided. It merge all ETs onto the root ET
@ In, input, list, list of file objects
@ In, links, list, list containing the links among the ETs
@ In, listRoots, list containing the root of all ETs
@ In, listETs, list, list containing the ID of the ETs
@ In, rootETID, xml.etree.Element, root of the main ET
@ Out, xmlNode, xml.etree.Element, root of the assembled root ET
"""
# 1. for all ET check if it contains SubBranches
ETset = []
for fileID in inputs:
eventTree = ET.parse(fileID.getPath() + fileID.getFilename())
root = self.checkSubBranches(eventTree.getroot())
ETset.append(root)
# 2. loop on the dependencies until it is empty
while len(links)>0:
for link in links:
indexMaster = listETs.index(link['ET_master_ID'])
indexSlave = listETs.index(link['ET_slave_ID'])
mergedTree = self.mergeLinkedTrees(listRoots[indexMaster],listRoots[indexSlave],link['link_seqID'])
listETs.pop(indexMaster)
listRoots.pop(indexMaster)
listETs.append(link['ET_master_ID'])
listRoots.append(mergedTree)
links = self.createLinkList(listRoots)
indexRootET = listETs.index(rootETID)
return listRoots[indexRootET]
def analyzeSingleET(self,masterRoot):
"""
This method executes the analysis of the ET if a single ET is provided.
@ In, masterRoot, xml.etree.Element, root of the ET
@ Out, pointSet, np.array, numpy matrix containing the pointSet data
"""
root = self.checkSubBranches(masterRoot)
## These outcomes will be encoded as integers starting at 0
outcomes = []
## These variables will be mapped into an array where there index
self.variables = []
values = {}
for node in root.findall('define-functional-event'):
event = node.get('name')
## First, map the variable to an index by placing it in a list
self.variables.append(event)
## Also, initialize the dictionary of values for this variable so we can
## encode them as integers as well
values[event] = []
## Iterate through the forks that use this event and gather all of the
## possible states
for fork in xmlU.findAllRecursive(root.find('initial-state'), 'fork'):
if fork.get('functional-event') == event:
for path in fork.findall('path'):
state = path.get('state')
if state not in values[event]:
values[event].append(state)
## Iterate through the sequences and gather all of the possible outcomes
## so we can numerically encode them latter
for node in root.findall('define-sequence'):
outcome = node.get('name')
if outcome not in outcomes:
outcomes.append(outcome)
etMap = self.returnMap(outcomes, root.get('name'))
print("ETImporter variables identified: " + str(format(self.variables)))
d = len(self.variables)
n = len(xmlU.findAllRecursive(root.find('initial-state'), 'sequence'))
pointSet = -1 * np.ones((n, d + 1))
rowCounter = 0
for node in root.find('initial-state'):
newRows = self.constructPointDFS(node, self.variables, values, etMap, pointSet, rowCounter)
rowCounter += newRows
if self.expand:
pointSet = self.expandPointSet(pointSet,values)
return pointSet
def expandPointSet(self,pointSet,values):
"""
This method performs a full-factorial expansion of the ET: if a branch contains a -1 element this method
duplicate the branch; each duplicated branch contains element values equal to +1 and 0.
@ In, pointSet, np.array, original point set
@ In, values, dict, dictionary containing the numerical value associated to each functional event
@ Out, pointSet, np.array, expanded point set
"""
| |
Why: #4919 in Alexa global
'http://www.carrefour.fr/',
# Why: #4922 in Alexa global
'http://www.tax.gov.ir/',
# Why: #4924 in Alexa global
'http://www.ruelala.com/',
# Why: #4925 in Alexa global
'http://www.mainspy.ru/',
# Why: #4926 in Alexa global
'http://www.phpwind.net/',
# Why: #4927 in Alexa global
'http://www.loteriasyapuestas.es/',
# Why: #4928 in Alexa global
'http://www.musavat.com/',
# Why: #4929 in Alexa global
'http://www.lenskart.com/',
# Why: #4930 in Alexa global
'http://www.tv-asahi.co.jp/',
# Why: #4931 in Alexa global
'http://www.refinery29.com/',
# Why: #4932 in Alexa global
'http://www.888poker.es/',
# Why: #4933 in Alexa global
'http://www.denverpost.com/',
# Why: #4934 in Alexa global
'http://www.who.int/',
# Why: #4935 in Alexa global
'http://www.thesims3.com/',
# Why: #4936 in Alexa global
'http://www.jerkhour.com/',
# Why: #4937 in Alexa global
'http://www.lyricsmode.com/',
# Why: #4938 in Alexa global
'http://www.ivillage.com/',
# Why: #4939 in Alexa global
'http://qyer.com/',
# Why: #4940 in Alexa global
'http://www.hktdc.com/',
# Why: #4941 in Alexa global
'http://www.pornoload.com/',
# Why: #4942 in Alexa global
'http://www.bluedart.com/',
# Why: #4943 in Alexa global
'http://www.here.com/',
# Why: #4944 in Alexa global
'http://www.philips.com/',
# Why: #4945 in Alexa global
'http://www.dsebd.org/',
# Why: #4946 in Alexa global
'http://www.tubidy.mobi/',
# Why: #4947 in Alexa global
'http://www.stream.cz/',
# Why: #4948 in Alexa global
'http://www.infojobs.com.br/',
# Why: #4949 in Alexa global
'http://www.soft98.ir/',
# Why: #4950 in Alexa global
'http://www.bolsaparanovatos.com/',
# Why: #4951 in Alexa global
'http://www.mercador.ro/',
# Why: #4952 in Alexa global
'http://www.neogaf.com/',
# Why: #4953 in Alexa global
'http://www.yardbarker.com/',
# Why: #4954 in Alexa global
'http://www.rapidlibrary.com/',
# Why: #4955 in Alexa global
'http://www.xxeronetxx.info/',
# Why: #4956 in Alexa global
'http://www.kaiserpermanente.org/',
# Why: #4957 in Alexa global
'http://www.telstra.com.au/',
# Why: #4958 in Alexa global
'http://www.contra.gr/',
# Why: #4959 in Alexa global
'http://www.laredoute.it/',
# Why: #4960 in Alexa global
'http://www.lipsum.com/',
# Why: #4961 in Alexa global
'http://www.twitlonger.com/',
# Why: #4962 in Alexa global
'http://www.hln.be/',
# Why: #4963 in Alexa global
'http://www.53kf.com/',
# Why: #4964 in Alexa global
'http://www.gofundme.com/',
# Why: #4965 in Alexa global
'http://www.carigold.com/',
# Why: #4966 in Alexa global
'http://www.clips4sale.com/',
# Why: #4967 in Alexa global
'http://www.focalprice.com/',
# Why: #4968 in Alexa global
'http://www.1111.com.tw/',
# Why: #4969 in Alexa global
'http://www.gameaholic.com/',
# Why: #4970 in Alexa global
'http://www.presstv.ir/',
# Why: #4971 in Alexa global
'http://www.puu.sh/',
# Why: #4973 in Alexa global
'http://www.filmlinks4u.net/',
# Why: #4974 in Alexa global
'http://www.traffic-delivery.com/',
# Why: #4975 in Alexa global
'http://www.bebo.com/',
# Why: #4976 in Alexa global
'http://enter.ru/',
# Why: #4977 in Alexa global
'http://www.shufoo.net/',
# Why: #4978 in Alexa global
'http://www.vivo.com.br/',
# Why: #4979 in Alexa global
'http://www.jizzhut.com/',
# Why: #4980 in Alexa global
'http://www.1jux.net/',
# Why: #4981 in Alexa global
'http://www.serebii.net/',
# Why: #4982 in Alexa global
'http://www.translate.ru/',
# Why: #4983 in Alexa global
'http://www.mtv3.fi/',
# Why: #4984 in Alexa global
'http://www.njuskalo.hr/',
# Why: #4985 in Alexa global
'http://www.bell.ca/',
# Why: #4986 in Alexa global
'http://www.myheritage.com/',
# Why: #4987 in Alexa global
'http://www.cic.fr/',
# Why: #4988 in Alexa global
'http://www.mercurynews.com/',
# Why: #4989 in Alexa global
'http://www.alaan.tv/',
# Why: #4990 in Alexa global
'http://www.econsultancy.com/',
# Why: #4991 in Alexa global
'http://www.pornhost.com/',
# Why: #4992 in Alexa global
'http://www.a8.net/',
# Why: #4994 in Alexa global
'http://www.netzero.net/',
# Why: #4995 in Alexa global
'http://www.tracklab101.com/',
# Why: #4996 in Alexa global
'http://www.spanishdict.com/',
# Why: #4997 in Alexa global
'http://www.amctv.com/',
# Why: #4998 in Alexa global
'http://www.erepublik.com/',
# Why: #4999 in Alexa global
'http://www.mk.ru/',
# Why: #5000 in Alexa global
'http://www.publico.es/',
# Why: #5001 in Alexa global
'http://www.newegg.com.cn/',
# Why: #5002 in Alexa global
'http://www.fux.com/',
# Why: #5003 in Alexa global
'http://www.webcamtoy.com/',
# Why: #5004 in Alexa global
'http://www.rahnama.com/',
# Why: #5005 in Alexa global
'http://www.wanyh.com/',
# Why: #5006 in Alexa global
'http://www.ecplaza.net/',
# Why: #5007 in Alexa global
'http://www.mol.gov.sa/',
# Why: #5008 in Alexa global
'http://www.torrentday.com/',
# Why: #5009 in Alexa global
'http://www.hsbc.com.br/',
# Why: #5010 in Alexa global
'http://www.interoperabilitybridges.com/',
# Why: #5011 in Alexa global
'http://www.billmelater.com/',
# Why: #5012 in Alexa global
'http://www.speedanalysis.com/',
# Why: #5013 in Alexa global
'http://www.volusion.com/',
# Why: #5014 in Alexa global
'http://www.mixcloud.com/',
# Why: #5015 in Alexa global
'http://www.weeronline.nl/',
# Why: #5016 in Alexa global
'http://www.tiancity.com/',
# Why: #5017 in Alexa global
'http://www.thehun.com/',
# Why: #5018 in Alexa global
'http://www.comparisons.org/',
# Why: #5019 in Alexa global
'http://www.eurosport.ru/',
# Why: #5020 in Alexa global
'http://www.trendyol.com/',
# Why: #5021 in Alexa global
'http://www.7120.com/',
# Why: #5022 in Alexa global
'http://www.eldiariodeamerica.com/',
# Why: #5023 in Alexa global
'http://www.fap8.com/',
# Why: #5024 in Alexa global
'http://www.joyme.com/',
# Why: #5025 in Alexa global
'http://www.ufl.edu/',
# Why: #5026 in Alexa global
'http://www.cuantocabron.com/',
# Why: #5027 in Alexa global
'http://www.hotmart.com.br/',
# Why: #5028 in Alexa global
'http://www.wolframalpha.com/',
# Why: #5029 in Alexa global
'http://www.cpasbien.com/',
# Why: #5030 in Alexa global
'http://www.sanalpazar.com/',
# Why: #5031 in Alexa global
'http://www.publipt.com/',
# Why: #5032 in Alexa global
'http://www.9ku.com/',
# Why: #5033 in Alexa global
'http://www.officemax.com/',
# Why: #5034 in Alexa global
'http://www.cuny.edu/',
# Why: #5035 in Alexa global
'http://www.gem.pl/',
# Why: #5036 in Alexa global
'http://www.waelelebrashy.com/',
# Why: #5037 in Alexa global
'http://www.coinmill.com/',
# Why: #5038 in Alexa global
'http://www.bet.com/',
# Why: #5039 in Alexa global
'http://www.moskva.fm/',
# Why: #5040 in Alexa global
'http://www.groupalia.com/',
# Why: #5041 in Alexa global
'http://131.com/',
# Why: #5042 in Alexa global
'http://www.pichak.net/',
# Why: #5043 in Alexa global
'http://www.theatlanticwire.com/',
# Why: #5044 in Alexa global
'http://tokyo-sports.co.jp/',
# Why: #5045 in Alexa global
'http://www.laptopmag.com/',
# Why: #5046 in Alexa global
'http://www.worldpay.com/',
# Why: #5047 in Alexa global
'http://www.groupon.pl/',
# Why: #5048 in Alexa global
'http://www.imeimama.com/',
# Why: #5049 in Alexa global
'http://www.torrents.net/',
# Why: #5051 in Alexa global
'http://www.britishcouncil.org/',
# Why: #5052 in Alexa global
'http://www.letsbonus.com/',
# Why: #5053 in Alexa global
'http://www.e-monsite.com/',
# Why: #5054 in Alexa global
'http://www.url.org/',
# Why: #5055 in Alexa global
'http://www.discuz.com/',
# Why: #5056 in Alexa global
'http://www.freepornsite.me/',
# Why: #5057 in Alexa global
'http://www.cheatcc.com/',
# Why: #5058 in Alexa global
'http://www.magicmovies.com/',
# Why: #5059 in Alexa global
'http://www.laterooms.com/',
# Why: #5060 in Alexa global
'http://www.du.ac.in/',
# Why: #5062 in Alexa global
'http://www.uservoice.com/',
# Why: #5063 in Alexa global
'http://www.discas.net/',
# Why: #5064 in Alexa global
'http://www.d1g.com/',
# Why: #5065 in Alexa global
'http://www.explicittube.com/',
# Why: #5066 in Alexa global
'http://www.e-autopay.com/',
# Why: #5067 in Alexa global
'http://3lian.com/',
# Why: #5068 in Alexa global
'http://www.oopsmovs.com/',
# Why: #5069 in Alexa global
'http://www.agenziaentrate.gov.it/',
# Why: #5070 in Alexa global
'http://www.ufc.com/',
# Why: #5071 in Alexa global
'http://www.mooshare.biz/',
# Why: #5072 in Alexa global
'http://www.ankang06.org/',
# Why: #5073 in Alexa global
'http://www.betradar.com/',
# Why: #5074 in Alexa global
'http://www.explosm.net/',
# Why: #5075 in Alexa global
'http://www.silkroad.com/',
# Why: #5076 in Alexa global
'http://www.crackberry.com/',
# Why: #5078 in Alexa global
'http://www.toyota.com/',
# Why: #5079 in Alexa global
'http://www.bongda.com.vn/',
# Why: #5080 in Alexa global
'http://www.europapress.es/',
# Why: #5081 in Alexa global
'http://www.mlxchange.com/',
# Why: #5082 in Alexa global
'http://www.plius.lt/',
# Why: #5083 in Alexa global
'http://www.pitchfork.com/',
# Why: #5084 in Alexa global
'http://www.groupon.de/',
# Why: #5085 in Alexa global
'http://www.hollisterco.com/',
# Why: #5086 in Alexa global
'http://www.hasoffers.com/',
# Why: #5087 in Alexa global
'http://www.miami.com/',
# Why: #5089 in Alexa global
'http://www.dslreports.com/',
# Why: #5090 in Alexa global
'http://www.blinkweb.com/',
# Why: #5091 in Alexa global
'http://www.alamaula.com/',
# Why: #5092 in Alexa global
'http://www.leonardo.it/',
# Why: #5093 in Alexa global
'http://www.very.co.uk/',
# Why: #5094 in Alexa global
'http://www.globalsources.com/',
# Why: #5095 in Alexa global
'http://www.viator.com/',
# Why: #5096 in Alexa global
'http://www.greenwichmeantime.com/',
# Why: #5097 in Alexa global
'http://www.appannie.com/',
# Why: #5099 in Alexa global
'http://www.eldorado.ru/',
# Why: #5100 in Alexa global
'http://www.canadiantire.ca/',
# Why: #5101 in Alexa global
'http://www.enjin.com/',
# Why: #5102 in Alexa global
'http://szhome.com/',
# Why: #5103 in Alexa global
'http://www.news-us.jp/',
# Why: #5104 in Alexa global
'http://www.phim3s.net/',
# Why: #5105 in Alexa global
'http://www.bash.im/',
# Why: #5106 in Alexa global
'http://www.immi.gov.au/',
# Why: #5107 in Alexa global
'http://www.cwb.gov.tw/',
# Why: #5108 in Alexa global
'http://www.enjoydressup.com/',
# Why: #5109 in Alexa global
'http://www.thesuperficial.com/',
# Why: #5110 in Alexa global
'http://www.bunshun.jp/',
# Why: #5111 in Alexa global
'http://www.91mobiles.com/',
| |
DatasetType: 'DatasetDescriptionFile.DatasetTypeEnum'):
self['DatasetType'] = DatasetType
@property
def License(self) -> 'str':
return self['License']
@License.setter
def License(self, License: 'str'):
self['License'] = License
@property
def Acknowledgements(self) -> 'str':
return self['Acknowledgements']
@Acknowledgements.setter
def Acknowledgements(self, Acknowledgements: 'str'):
self['Acknowledgements'] = Acknowledgements
@property
def HowToAcknowledge(self) -> 'str':
return self['HowToAcknowledge']
@HowToAcknowledge.setter
def HowToAcknowledge(self, HowToAcknowledge: 'str'):
self['HowToAcknowledge'] = HowToAcknowledge
@property
def DatasetDOI(self) -> 'str':
return self['DatasetDOI']
@DatasetDOI.setter
def DatasetDOI(self, DatasetDOI: 'str'):
self['DatasetDOI'] = DatasetDOI
@property
def Authors(self) -> 'List[str]':
return self['Authors']
@Authors.setter
def Authors(self, Authors: 'List[str]'):
self['Authors'] = Authors
@property
def Funding(self) -> 'List[str]':
return self['Funding']
@Funding.setter
def Funding(self, Funding: 'List[str]'):
self['Funding'] = Funding
@property
def EthicsApprovals(self) -> 'List[str]':
return self['EthicsApprovals']
@EthicsApprovals.setter
def EthicsApprovals(self, EthicsApprovals: 'List[str]'):
self['EthicsApprovals'] = EthicsApprovals
@property
def ReferencesAndLinks(self) -> 'List[str]':
return self['ReferencesAndLinks']
@ReferencesAndLinks.setter
def ReferencesAndLinks(self, ReferencesAndLinks: 'List[str]'):
self['ReferencesAndLinks'] = ReferencesAndLinks
MEMBERS = {
'Name': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
'BIDSVersion': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
'HEDVersion': {'type': 'str', 'min': 0, 'max': 1, 'use': 'recommended', 'meta': {}},
'DatasetType': {'type': 'DatasetTypeEnum', 'min': 0, 'max': 1, 'use': 'recommended', 'meta': {}},
'License': {'type': 'str', 'min': 0, 'max': 1, 'use': 'recommended', 'meta': {}},
'Acknowledgements': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'HowToAcknowledge': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'DatasetDOI': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'Authors': {'type': 'str', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
'Funding': {'type': 'str', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
'EthicsApprovals': {'type': 'str', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
'ReferencesAndLinks': {'type': 'str', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class DerivativeDatasetDescriptionFile(DatasetDescriptionFile):
def __init__(self, GeneratedBy: 'List[GeneratedBy]' = None, SourceDatasets: 'List[SourceDatasets]' = None, Name: 'str' = None, BIDSVersion: 'str' = None, HEDVersion: 'str' = None, DatasetType: 'DatasetDescriptionFile.DatasetTypeEnum' = None, License: 'str' = None, Acknowledgements: 'str' = None, HowToAcknowledge: 'str' = None, DatasetDOI: 'str' = None, Authors: 'List[str]' = None, Funding: 'List[str]' = None, EthicsApprovals: 'List[str]' = None, ReferencesAndLinks: 'List[str]' = None, contents: 'Dict' = None, name: 'str' = None, extension: 'str' = None, uri: 'str' = None):
super(DerivativeDatasetDescriptionFile, self).__init__(Name or None, BIDSVersion or None, HEDVersion or None, DatasetType or None, License or None, Acknowledgements or None, HowToAcknowledge or None, DatasetDOI or None, Authors or [], Funding or [], EthicsApprovals or [], ReferencesAndLinks or [], contents or None, name or None, extension or None, uri or None)
self['GeneratedBy'] = GeneratedBy or []
self['SourceDatasets'] = SourceDatasets or []
@property
def GeneratedBy(self) -> 'List[GeneratedBy]':
return self['GeneratedBy']
@GeneratedBy.setter
def GeneratedBy(self, GeneratedBy: 'List[GeneratedBy]'):
self['GeneratedBy'] = GeneratedBy
@property
def SourceDatasets(self) -> 'List[SourceDatasets]':
return self['SourceDatasets']
@SourceDatasets.setter
def SourceDatasets(self, SourceDatasets: 'List[SourceDatasets]'):
self['SourceDatasets'] = SourceDatasets
MEMBERS = {
'GeneratedBy': {'type': 'GeneratedBy', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
'SourceDatasets': {'type': 'SourceDatasets', 'min': 0, 'max': inf, 'use': 'recommended', 'meta': {}},
}
class DerivativeFolder(Folder):
def __init__(self, dataset_description: 'DerivativeDatasetDescriptionFile' = None, derivatives: 'List[DerivativeFolder]' = None, name: 'str' = None, files: 'List[File]' = None, folders: 'List[Folder]' = None, metadatafiles: 'List[MetadataFile]' = None):
super(DerivativeFolder, self).__init__(name or None, files or [], folders or [], metadatafiles or [])
self['dataset_description'] = dataset_description or None
self['derivatives'] = derivatives or []
@property
def dataset_description(self) -> 'DerivativeDatasetDescriptionFile':
return self['dataset_description']
@dataset_description.setter
def dataset_description(self, dataset_description: 'DerivativeDatasetDescriptionFile'):
self['dataset_description'] = dataset_description
@property
def derivatives(self) -> 'List[DerivativeFolder]':
return self['derivatives']
@derivatives.setter
def derivatives(self, derivatives: 'List[DerivativeFolder]'):
self['derivatives'] = derivatives
MEMBERS = {
'dataset_description': {'type': 'DerivativeDatasetDescriptionFile', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'derivatives': {'type': 'DerivativeFolder', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class Session(Folder):
def __init__(self, datatypes: 'List[DatatypeFolder]' = None, name: 'str' = None, files: 'List[File]' = None, folders: 'List[Folder]' = None, metadatafiles: 'List[MetadataFile]' = None):
super(Session, self).__init__(name or None, files or [], folders or [], metadatafiles or [])
self['datatypes'] = datatypes or []
@property
def datatypes(self) -> 'List[DatatypeFolder]':
return self['datatypes']
@datatypes.setter
def datatypes(self, datatypes: 'List[DatatypeFolder]'):
self['datatypes'] = datatypes
MEMBERS = {
'datatypes': {'type': 'DatatypeFolder', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class DatatypeFolder(Folder):
def __init__(self, artifacts: 'List[Artifact]' = None, name: 'str' = None, files: 'List[File]' = None, folders: 'List[Folder]' = None, metadatafiles: 'List[MetadataFile]' = None):
super(DatatypeFolder, self).__init__(name or None, files or [], folders or [], metadatafiles or [])
self['artifacts'] = artifacts or []
@property
def artifacts(self) -> 'List[Artifact]':
return self['artifacts']
@artifacts.setter
def artifacts(self, artifacts: 'List[Artifact]'):
self['artifacts'] = artifacts
MEMBERS = {
'artifacts': {'type': 'Artifact', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class Subject(Folder):
def __init__(self, sessions: 'List[Session]' = None, datatypes: 'List[DatatypeFolder]' = None, name: 'str' = None, files: 'List[File]' = None, folders: 'List[Folder]' = None, metadatafiles: 'List[MetadataFile]' = None):
super(Subject, self).__init__(name or None, files or [], folders or [], metadatafiles or [])
self['sessions'] = sessions or []
self['datatypes'] = datatypes or []
@property
def sessions(self) -> 'List[Session]':
return self['sessions']
@sessions.setter
def sessions(self, sessions: 'List[Session]'):
self['sessions'] = sessions
@property
def datatypes(self) -> 'List[DatatypeFolder]':
return self['datatypes']
@datatypes.setter
def datatypes(self, datatypes: 'List[DatatypeFolder]'):
self['datatypes'] = datatypes
MEMBERS = {
'sessions': {'type': 'Session', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {'name_pattern': 'ses-.*'}},
'datatypes': {'type': 'DatatypeFolder', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class GeneratedBy(Model):
def __init__(self, Name: 'str' = None, Version: 'str' = None, Description: 'str' = None, CodeURL: 'str' = None, Container: 'List[GeneratedByContainer]' = None):
super(GeneratedBy, self).__init__()
self['Name'] = Name or None
self['Version'] = Version or None
self['Description'] = Description or None
self['CodeURL'] = CodeURL or None
self['Container'] = Container or []
@property
def Name(self) -> 'str':
return self['Name']
@Name.setter
def Name(self, Name: 'str'):
self['Name'] = Name
@property
def Version(self) -> 'str':
return self['Version']
@Version.setter
def Version(self, Version: 'str'):
self['Version'] = Version
@property
def Description(self) -> 'str':
return self['Description']
@Description.setter
def Description(self, Description: 'str'):
self['Description'] = Description
@property
def CodeURL(self) -> 'str':
return self['CodeURL']
@CodeURL.setter
def CodeURL(self, CodeURL: 'str'):
self['CodeURL'] = CodeURL
@property
def Container(self) -> 'List[GeneratedByContainer]':
return self['Container']
@Container.setter
def Container(self, Container: 'List[GeneratedByContainer]'):
self['Container'] = Container
MEMBERS = {
'Name': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
'Version': {'type': 'str', 'min': 0, 'max': 1, 'use': 'recommended', 'meta': {}},
'Description': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'CodeURL': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'Container': {'type': 'GeneratedByContainer', 'min': 0, 'max': inf, 'use': 'optional', 'meta': {}},
}
class SourceDatasets(Model):
def __init__(self, DOI: 'str' = None, URL: 'str' = None, Version: 'str' = None):
super(SourceDatasets, self).__init__()
self['DOI'] = DOI or None
self['URL'] = URL or None
self['Version'] = Version or None
@property
def DOI(self) -> 'str':
return self['DOI']
@DOI.setter
def DOI(self, DOI: 'str'):
self['DOI'] = DOI
@property
def URL(self) -> 'str':
return self['URL']
@URL.setter
def URL(self, URL: 'str'):
self['URL'] = URL
@property
def Version(self) -> 'str':
return self['Version']
@Version.setter
def Version(self, Version: 'str'):
self['Version'] = Version
MEMBERS = {
'DOI': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
'URL': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
'Version': {'type': 'str', 'min': 0, 'max': 1, 'use': 'required', 'meta': {}},
}
class GeneratedByContainer(Model):
def __init__(self, Type: 'str' = None, Tag: 'str' = None, URI: 'str' = None):
super(GeneratedByContainer, self).__init__()
self['Type'] = Type or None
self['Tag'] = Tag or None
self['URI'] = URI or None
@property
def Type(self) -> 'str':
return self['Type']
@Type.setter
def Type(self, Type: 'str'):
self['Type'] = Type
@property
def Tag(self) -> 'str':
return self['Tag']
@Tag.setter
def Tag(self, Tag: 'str'):
self['Tag'] = Tag
@property
def URI(self) -> 'str':
return self['URI']
@URI.setter
def URI(self, URI: 'str'):
self['URI'] = URI
MEMBERS = {
'Type': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'Tag': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
'URI': {'type': 'str', 'min': 0, 'max': 1, 'use': 'optional', 'meta': {}},
}
class Dataset(Folder):
def __init__(self, subjects: 'List[Subject]' = None, dataset_description: 'DatasetDescriptionFile' = None, README: 'File' = None, CHANGES: 'File' = None, LICENSE: 'File' = None, genetic_info: 'JsonFile' = None, samples: 'JsonFile' = None, participants_tsv: 'File' = | |
get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a & 0xffff) * (d_b & 0xffff)) << n.value) & (sc^0xffff))
result = d_d + mul_res
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RRR1_MADD_Q_43_1D_Inst(Instruction):
""" Multiply-Add Q Format instruction:
op = 0x43
op2 = 0x1D
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADD.Q_43_1D'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(1)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xd)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc = extend_to_16_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a & 0xffff) * (d_b & 0xffff)) << n.value) & (sc^0xffff))
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 + (mul_res << 16)
result_w1 = e_d_1
# put results
self.put(result_w0, "d{0}".format(self.data['c']))
self.put(result_w1, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= 32
result |= result_w0
# set flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADD_Q_43_04_Inst(Instruction):
""" Multiply-Add Q Format instruction:
op = 0x43
op2 = 0x04
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADD.Q_43_04'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(0)[2:].zfill(2))
op2_2 = "{0}".format(bin(4)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a >> 16) * (d_b >> 16)) << n.value) & (sc^0xffff))
result = d_d + mul_res
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RRR1_MADD_Q_43_1C_Inst(Instruction):
""" Multiply-Add Q Format instruction:
op = 0x43
op2 = 0x1C
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADD.Q_43_1C'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(1)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xc)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc = extend_to_16_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a >> 16) * (d_b >> 16)) << n.value) & (sc^0xffff))
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 + (mul_res << 16)
result_w1 = e_d_1
# put results
self.put(result_w0, "d{0}".format(self.data['c']))
self.put(result_w1, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= 32
result |= result_w0
# set flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MADDS_Q_43_22_Inst(Instruction):
""" Multiply-Add Q Format, Saturated instruction:
op = 0x43
op2 = 0x22
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDS.Q_43_22'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(2)[2:].zfill(2))
op2_2 = "{0}".format(bin(2)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
result1 = d_d + (((d_a * d_b) << n.value) >> 32)
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result = ssov32(result1, max_pos, max_neg)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RRR1_MADDS_Q_43_3B_Inst(Instruction):
""" Multiply-Add Q Format, Saturated instruction:
op = 0x43
op2 = 0x3B
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MADDS.Q_43_3B'
op = "{0}{1}".format(bin(4)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(3)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xb)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
result_tmp = (d_a * d_b) << n.value
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 + result_tmp
result_w1 = e_d_1
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put | |
None
def getDirty(self):
return (self.saveDirtySince is not None,
self.updateDirtySince is not None)
def getDirtySince(self):
return (self.saveDirtySince, self.updateDirtySince)
def setEditorText(self, text, dirty=True):
with self.textOperationLock:
if self.isReadOnlyEffect():
return
super(DataCarryingPage, self).setEditorText(text)
if text is None:
if self.saveDirtySince is not None:
"""
Editor text was removed although it wasn't in sync with
database, so the self.liveTextPlaceHold must be updated,
but self.saveDirtySince is set to None because
self.editorText isn't valid anymore
"""
self.saveDirtySince = None
self.liveTextPlaceHold = object()
else:
if dirty:
self.setDirty(True)
self.liveTextPlaceHold = object()
def checkFileSignatureAndMarkDirty(self, fireEvent=True):
return True
def markTextChanged(self):
"""
Mark text as changed and cached pageAst as invalid.
Mainly called when an external file change is detected.
"""
self.liveTextPlaceHold = object()
def getAttributeOrGlobal(self, attrkey, default=None):
"""
Tries to find an attribute on this page and returns the first value.
If it can't be found for page, it is searched for a global
attribute with this name. If this also can't be found,
default (normally None) is returned.
"""
raise NotImplementedError #abstract
class AbstractWikiPage(DataCarryingPage):
"""
Abstract base for WikiPage and Versioning.WikiPageSnapshot
"""
def __init__(self, wikiDocument, wikiPageName):
DataCarryingPage.__init__(self, wikiDocument)
self.livePageBasePlaceHold = None # liveTextPlaceHold object on which
# the livePageAst is based.
# This is needed to check for changes when saving
self.livePageBaseFormatDetails = None # Cached format details on which the
# page-ast bases
# List of words unknown to spellchecker
self.liveSpellCheckerUnknownWords = None
# liveTextPlaceHold object on which the liveSpellCheckerUnknownWords is based.
self.liveSpellCheckerUnknownWordsBasePlaceHold = None
self.__sinkWikiDocumentSpellSession = KeyFunctionSinkAR((
("modified spell checker session", self.onModifiedSpellCheckerSession),
))
self.wikiPageName = wikiPageName
self.childRelations = None
self.childRelationSet = set()
self.todos = None
self.attrs = None
self.modified, self.created, self.visited = None, None, None
self.suggNewPageTitle = None # Title to use for page if it is
# newly created
# if self.getWikiData().getMetaDataState(self.wikiPageName) != 1:
# self.updateDirtySince = time.time()
def invalidate(self):
super(AbstractWikiPage, self).invalidate()
self.__sinkWikiDocumentSpellSession.setEventSource(None)
# TODO: Replace getWikiWord by getWikiPageName where appropriate
def getWikiWord(self):
"""
Overwritten by AliasPage to return the alias name
"""
return self.wikiPageName
def getWikiPageName(self):
"""
This returns the real page name even for an AliasPage
"""
return self.wikiPageName
def getTitle(self):
"""
Return human readable title of the page.
"""
return self.getWikiWord()
def getUnifiedPageName(self):
"""
Return the name of the unified name of the page, which is
"wikipage/" + the wiki word for wiki pages or the functional tag
for functional pages.
"""
return u"wikipage/" + self.wikiPageName
def getWikiDocument(self):
return self.wikiDocument
def getWikiData(self):
return self.wikiDocument.getWikiData()
def getMetaDataState(self):
return self.getWikiData().getMetaDataState(self.wikiPageName)
def addTxtEditor(self, txted):
"""
Add txted to the list of editors (views) showing this page.
"""
with self.txtEditorListLock:
if len(self.txtEditors) == 0:
with self.textOperationLock:
if not self.checkFileSignatureAndMarkDirty():
self.initiateUpdate()
super(AbstractWikiPage, self).addTxtEditor(txted)
# TODO Set text in editor here if first editor is created?
# with self.txtEditorListLock:
if not txted in self.txtEditors:
if len(self.txtEditors) == 0:
with self.textOperationLock:
# We are assuming that editor content came from
# database
self.setEditorText(txted.GetText(), dirty=False)
self.txtEditors.append(txted)
def getTimestamps(self):
"""
Return tuple (<last mod. time>, <creation time>, <last visit time>)
of this page.
"""
if self.modified is None:
self.modified, self.created, self.visited = \
self.getWikiData().getTimestamps(self.wikiPageName)
if self.modified is None:
ti = time.time()
self.modified, self.created, self.visited = ti, ti, ti
return self.modified, self.created, self.visited
def setTimestamps(self, timestamps):
if self.isReadOnlyEffect():
return
timestamps = timestamps[:3]
self.modified, self.created, self.visited = timestamps
self.getWikiData().setTimestamps(self.wikiPageName, timestamps)
def getSuggNewPageTitle(self):
return self.suggNewPageTitle
def setSuggNewPageTitle(self, suggNewPageTitle):
self.suggNewPageTitle = suggNewPageTitle
def getParentRelationships(self):
return self.getWikiData().getParentRelationships(self.wikiPageName)
def getChildRelationships(self, existingonly=False, selfreference=True,
withFields=(), excludeSet=frozenset(),
includeSet=frozenset()):
"""
get the child relations of this word
existingonly -- List only existing wiki words
selfreference -- List also wikiWord if it references itself
withFields -- Seq. of names of fields which should be included in
the output. If this is not empty, tuples are returned
(relation, ...) with ... as further fields in the order mentioned
in withfields.
Possible field names:
"firstcharpos": position of link in page (may be -1 to represent
unknown)
"modified": Modification date
excludeSet -- set of words which should be excluded from the list
includeSet -- wikiWords to include in the result
Does not support caching
"""
with self.textOperationLock:
wikiData = self.getWikiData()
wikiDocument = self.getWikiDocument()
if withFields is None:
withFields = ()
relations = wikiData.getChildRelationships(self.wikiPageName,
existingonly, selfreference, withFields=withFields)
if len(excludeSet) > 0:
# Filter out members of excludeSet
if len(withFields) > 0:
relations = [r for r in relations if not r[0] in excludeSet]
else:
relations = [r for r in relations if not r in excludeSet]
if len(includeSet) > 0:
# First unalias wiki pages and remove non-existing ones
clearedIncSet = set()
for w in includeSet:
w = wikiDocument.getWikiPageNameForLinkTerm(w)
if w is None:
continue
# if not wikiDocument.isDefinedWikiLinkTerm(w):
# continue
clearedIncSet.add(w)
# Then remove items already present in relations
if len(clearedIncSet) > 0:
if len(withFields) > 0:
for r in relations:
clearedIncSet.discard(r[0])
else:
for r in relations:
clearedIncSet.discard(r)
# Now collect info
if len(clearedIncSet) > 0:
relations += [wikiData.getExistingWikiWordInfo(r,
withFields=withFields) for r in clearedIncSet]
return relations
def getAttributes(self):
with self.textOperationLock:
if self.attrs is not None:
return self.attrs
data = self.getWikiData().getAttributesForWord(self.wikiPageName)
# with self.textOperationLock:
# if self.attrs is not None:
# return self.attrs
self.attrs = {}
for (key, val) in data:
self._addAttribute(key, val)
return self.attrs
getProperties = getAttributes # TODO remove "property"-compatibility
def getAttribute(self, attrkey, default=None):
with self.textOperationLock:
attrs = self.getAttributes()
if attrs.has_key(attrkey):
return attrs[attrkey][-1]
else:
return default
def getAttributeOrGlobal(self, attrkey, default=None):
"""
Tries to find an attribute on this page and returns the first value.
If it can't be found for page, it is searched for a global
attribute with this name. If this also can't be found,
default (normally None) is returned.
"""
with self.textOperationLock:
attrs = self.getAttributes()
if attrs.has_key(attrkey):
return attrs[attrkey][-1]
globalAttrs = self.getWikiData().getGlobalAttributes()
attrkey = u"global." + attrkey
if globalAttrs.has_key(attrkey):
return globalAttrs[attrkey]
option = "attributeDefault_" + attrkey
config = wx.GetApp().getGlobalConfig()
if config.isOptionAllowed("main", option):
return config.get("main", option, default)
return default
getPropertyOrGlobal = getAttributeOrGlobal # TODO remove "property"-compatibility
def _addAttribute(self, key, val):
values = self.attrs.get(key)
if not values:
values = []
self.attrs[key] = values
values.append(val)
# def getTodos(self):
# with self.textOperationLock:
# if self.todos is None:
# self.todos = self.getWikiData().getTodosForWord(self.wikiPageName)
#
# return self.todos
def getAnchors(self):
"""
Return sequence of anchors in page
"""
pageAst = self.getLivePageAst()
return [node.anchorLink
for node in pageAst.iterDeepByName("anchorDef")]
def getLiveTextNoTemplate(self):
"""
Return None if page isn't existing instead of creating an automatic
live text (e.g. by template).
"""
with self.textOperationLock:
if self.getTxtEditor() is not None:
return self.getLiveText()
else:
if self.isDefined():
return self.getContent()
else:
return None
def getFormatDetails(self):
"""
According to currently stored settings, return a
ParseUtilities.WikiPageFormatDetails object to describe
formatting
"""
with self.textOperationLock:
withCamelCase = strToBool(self.getAttributeOrGlobal(
u"camelCaseWordsEnabled"), True)
# footnotesAsWws = self.wikiDocument.getWikiConfig().getboolean(
# "main", "footnotes_as_wikiwords", False)
autoLinkMode = self.getAttributeOrGlobal(u"auto_link", u"off").lower()
paragraphMode = strToBool(self.getAttributeOrGlobal(
u"paragraph_mode"), False)
langHelper = wx.GetApp().createWikiLanguageHelper(
self.wikiDocument.getWikiDefaultWikiLanguage())
wikiLanguageDetails = langHelper.createWikiLanguageDetails(
self.wikiDocument, self)
return ParseUtilities.WikiPageFormatDetails(
withCamelCase=withCamelCase,
wikiDocument=self.wikiDocument,
basePage=self,
autoLinkMode=autoLinkMode,
paragraphMode=paragraphMode,
wikiLanguageDetails=wikiLanguageDetails)
def isDefined(self):
return self.getWikiDocument().isDefinedWikiPageName(self.getWikiWord())
@staticmethod
def extractAttributeNodesFromPageAst(pageAst):
"""
Return an iterator of attribute nodes in pageAst. This does not return
attributes inside of todo entries.
"""
# Complicated version for compatibility with old language plugins
# TODO 2.4 remove "property"-compatibility
return Utilities.iterMergesort((
pageAst.iterUnselectedDeepByName("attribute",
frozenset(("todoEntry",))),
pageAst.iterUnselectedDeepByName("property",
frozenset(("todoEntry",))) ),
key=lambda n: n.pos)
# Simple one for later
# return pageAst.iterUnselectedDeepByName("attribute",
# frozenset(("todoEntry",)))
@staticmethod
def extractTodoNodesFromPageAst(pageAst):
"""
Return an iterator of todo nodes in pageAst.
"""
return pageAst.iterDeepByName("todoEntry")
def _save(self, text, fireEvent=True):
"""
Saves the content of current doc page.
"""
pass
def setPresentation(self, data, startPos):
"""
Set (a part of) the presentation tuple. This is silently ignored
if the "write access failed" or "read access failed" flags are
set in the wiki document.
data -- tuple with new presentation data
startPos -- start position in the presentation tuple which should be
overwritten with data.
"""
raise NotImplementedError # abstract
def initiateUpdate(self, fireEvent=True):
"""
Initiate update of page meta-data. This function may call update
directly if can be done fast
"""
pass
def getLivePageAstIfAvailable(self):
"""
Return the current, up-to-date page AST if available, None otherwise
"""
with self.textOperationLock:
| |
# calculate_ICEO.py
"""
Notes
"""
# import modules
import numpy as np
import matplotlib.pyplot as plt
def calculate_ICEO(testSetup, testCol, plot_figs=False, savePath=None):
# write script to calculate and output all of the below terms using the testSetup class
"""
Required Inputs:
# physical constants
eps_fluid: permittivity of water (F/m2) CurlypivTestSetup.chip.material_fluid.permittivity
eps_dielectric: permittivity of sio2 () CurlypivTestSetup.chip.bpe.dielectric_coating.permittivity
T: temperature (K) CurlypivTestSetup.chip.material_fluid.temperature
# material properties
rho: density (kg/m3) depends on the instance
mu: dynamic viscosity (m2/s) CurlypivTestSetup.chip.material_fluid.viscosity
sigma: electrical conductivity (S/m) CurlypivTestSetup.chip.material_fluid.conductivity
zeta: zeta potential (V) depends on the instance
Ns: surface site density (#/nm2) CurlypivTestSetup.chip.material_fluid.reaction_site_density
Ka: reaction equilibrium constant () CurlypivTestSetup.chip.material_fluid.Ka
a_h: bulk concentration of protons (mmols) (I think this is just pH) CurlypivTestSetup.chip.material_fluid.pH
# geometries
l: characteristic length scale (m) CurlypivTestSetup.chip.channel.height
l_bpe: length of bpe (m) CurlypivTestSetup.chip.bpe.length
d: thickness of sio2 dielectric (m) CurlypivTestSetup.chip.bpe.dielectric_coating.thickness
# experimental inputs
x: location (m) * need to write * array of locations along BPE length for instanteous induced zeta calc.
t: time (s) * need to write * array of times in a periodic cycle for instanteous zeta calc.
f: frequency (1/s) * need to write * CurlypivTestCollection.locs.tests.test_id[1]
E: electric field strength (V/m) * need to write * CurlypivTestCollection.locs.tests.test_id[0]
# outputs
lamb: debye length (m)
Cd: capacitance of dielectric (F/m2) # needs to be scaled by BPE surface area
Cdl_linear: linear double layer capacitance (F/m2) # needs to be scaled by BPE surface area
Cdl_nonlinear: nonlinear double layer cap. (F/m2) # needs to be scaled by BPE surface area
Cbuff: buffer capacitance (F/m2) * = 0.024 from Squires * # needs to be scaled by BPE surface area
Ctotal: total capacitance (F/m2) # needs to be scaled by BPE surface area
U: characteristic velocity (m/s)
Re: Reynolds number ()
U_HS: Helmholtz Smoluchowski velocity (m/s)
U_slip: slip velocity (m/s)
tau: Double layer charging time (s)
zeta_qu_steady: quasi-steady induced zeta (V)
U_quasi_steady: quasi-steady slip velocity (m/s)
zeta_highfreq: high-frequency induced zeta (V)
U_highfreq: high-frequency slip velocity (m/s)
"""
# define variables here to simplify
# identities of components
dielectric_material = testSetup.chip.bpe.dielectric_coating.name
electrolyte_material = testSetup.chip.channel.material_fluid.name
# mechanical
mu = testSetup.chip.channel.material_fluid.viscosity
rho = testSetup.chip.channel.material_fluid.density
T = testSetup.chip.channel.material_fluid.temperature
# electro/chemical
eps_fluid = testSetup.chip.channel.material_fluid.permittivity
eps_dielectric = testSetup.chip.bpe.dielectric_coating.permittivity
reaction_site_density = testSetup.chip.bpe.dielectric_coating.reaction_site_density
Ka = testSetup.chip.bpe.dielectric_coating.Ka
Kb = testSetup.chip.bpe.dielectric_coating.Kb
pH = testSetup.chip.channel.material_fluid.pH
zeta_wall = testSetup.chip.channel.material_bottom_wall_surface.zeta
c = testSetup.chip.channel.material_fluid.concentration
sigma = testSetup.chip.channel.material_fluid.conductivity
# geometry
L = testSetup.chip.channel.length
L_bpe = testSetup.chip.bpe.length
x = testSetup.chip.bpe.linspace_x
channel_height = testSetup.chip.channel.height
dielectric_thickness = testSetup.chip.bpe.dielectric_coating.thickness
# PIV
img_acq_rate = testSetup.optics.microscope.ccd.img_acq_rate
dt = 1 / img_acq_rate # (s) time between images
p_d = testSetup.optics.fluorescent_particles.diameter
microns_to_pixels = 1/testSetup.optics.microscope.objective.pixel_to_micron
u_slip_error_scale = 0.3
# print PIV stats
dx_brownian = calc_brownian_displacement(dt, mu, p_d, T)
print("Brownian displacement: {} for {} particle diameter and {} time step".format(dx_brownian, p_d, dt))
print("Squires recommended: U_min_acceptable > {} um/s ({} pix/frame) or 20% of Brownian motion".format(np.round(dx_brownian*1e6/dt*0.2,2), np.round(microns_to_pixels*dx_brownian*1e6/(dt*img_acq_rate)*0.2,2)))
# extract the test collection test parameters
test_params = []
for key in testCol.locs:
loc = testCol.locs[key]
loc_tests = loc.tests
for ky in loc_tests:
test_keys = loc_tests[ky]
test_params.append((test_keys._E, test_keys._f))
# initialize output data arrays
electric_fields = []
frequencys = []
dielectrics = []
buffers = []
UbyUo = []
raw_uvel_max = []
raw_slope = []
betas = []
deltas = []
taus = []
d_eps = []
d_pKa = []
d_Ns = []
d_thick = []
b_conc = []
b_conduct = []
b_pH = []
b_viscosity = []
b_eps = []
b_debye = []
voltages = []
electrode_spacings = []
# Non-Squires terms
uvel_brownian_error_steady = []
uvel_brownian_error_quasisteady = []
uvel_brownian_error_highfreq = []
# iterate through test parameters
for i in range(len(test_params)):
# iterables
V_channel = test_params[i][0]
f = test_params[i][1]
# calculate intermediaries
E = V_channel/L
t = np.linspace(0, 1/f, num=100)
w = calc_w(f=f)
lamb = calc_lamb(eps_fluid=eps_fluid, c=c, T=T)
Cd = calc_dielectric_capacitance(eps=eps_dielectric, d=dielectric_thickness)
Cdl_linear = calc_linear_doublelayer_capacitance(eps=eps_fluid, lamb=lamb)
Cbuff = calc_buffer_capacitance(Cbuff_input=0.024)
C_bare_metal = Cdl_linear + Cbuff
total_capacitance, beta, delta = calc_total_capacitance(eps_fluid=eps_fluid, lamb=lamb, Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
tau = calc_RC_via_bulk_time(capacitance=Cdl_linear, L=L_bpe, sigma=sigma)
# calculate background flow
u_HS = calc_U_HS(eps=eps_fluid, zeta=zeta_wall, E=E, mu=mu)
Re = calc_Re(rho=rho, U=u_HS, l=channel_height, mu=mu)
# calculate slip flow (DC)
zeta_induced = calc_zeta_induced(E=E, x=x)
u_slip = calc_U_slip(eps=eps_fluid, E=E, x=x, mu=mu)
slope_x = 40
u_slip_slope = u_slip[slope_x:len(u_slip)-slope_x]
# calculate the Brownian error for quasi-steady slip flow
error_brownian_steady = calc_brownian_error(U_estimated=u_slip, u_scale=u_slip_error_scale, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate slip flow (quasi-steady)
zeta_induced_quasisteady = calc_zeta_induced_quasisteady(E=E, x=x)
u_slip_quasisteady = calc_U_slip_quasisteady(eps=eps_fluid, E=E, x=x, mu=mu)
# calculate the Brownian error for quasi-steady slip flow
error_brownian_quasisteady = calc_brownian_error(U_estimated=u_slip_quasisteady, u_scale=u_slip_error_scale, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate slip flow (high-frequency)
zeta_induced_highfreq = calc_zeta_induced_highfreq(Re=Re, E=E, x=x, w=w, t=t, tau=tau)
u_slip_highfreq = calc_U_slip_highfreq(eps=eps_fluid, E=E, x=x, mu=mu, tau=tau, f=f)
# calculate the Brownian error for quasi-steady slip flow
error_brownian_highfreq = calc_brownian_error(U_estimated=u_slip_quasisteady, u_scale=0.1, dt=dt, viscosity=mu, particle_diameter=p_d, temperature=T)
# calculate induced zeta with linear zeta and dielectric coating
zeta_induced_Clamb_Cd_linear = calc_zeta_induced_Clamb_Cd(E=E, x=x, Cdl=Cdl_linear, Cd=Cd)
# calculate induced zeta with nonlinear zeta and dielectric coating
Cdl_nonlinear = calc_nonlinear_doublelayer_capacitance(eps_fluid, lamb=lamb, zeta=zeta_induced_Clamb_Cd_linear)
zeta_induced_Clamb_Cd_nonlinear = calc_zeta_induced_Clamb_Cd(E, x, Cdl=Cdl_nonlinear, Cd=Cd)
# calculate induced zeta with total capacitance
zeta_induced_total_capacitance = calc_zeta_induced_total_capacitance(E=E, x=x, Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
u_ratio_slip_to_HS = U_ratio_slip_to_HS(Cdl=Cdl_linear, Cd=Cd, Cbuff=Cbuff)
# calculate some Squires specific data
slope_x = 40
u_slope = (u_slip_quasisteady[-slope_x]-u_slip_quasisteady[slope_x]) / (x[-slope_x]-x[slope_x])
u_UbyUo = -u_slope / np.max(u_slip_slope)
# plot important metrics
if plot_figs is True:
import matplotlib as mpl
from cycler import cycler
mpl.rc('lines', linewidth=4, linestyle='-')
mpl.rcParams['axes.prop_cycle'] = cycler(color=['r', 'g', 'b', 'y'])
fig, axes = plt.subplots(nrows=3, sharex=True, figsize=(13,10))
ax = axes.ravel()
ax[0].plot(x*1e6, zeta_induced*1e3, label=r'$steady$')
ax[0].plot(x*1e6, zeta_induced_quasisteady*1e3, label=r'$quasi-steady$')
ax[0].plot(x * 1e6, zeta_induced_highfreq * 1e3, label=r'$high-frequency$')
ax[0].plot(x * 1e6, zeta_induced_Clamb_Cd_linear * 1e3, label=r'$C_{\lambda}+C_d (linear)$')
ax[0].plot(x * 1e6, zeta_induced_Clamb_Cd_nonlinear * 1e3, label=r'$C_{\lambda}+C_d (non linear)$')
ax[0].plot(x * 1e6, zeta_induced_total_capacitance * 1e3, label=r'$C_{total}$')
ax[1].plot(x*1e6, u_slip*1e6, label=r'$steady$')
ax[1].plot(x*1e6, u_slip_quasisteady*1e6, label=r'$quasi-steady$')
ax[1].plot(x * 1e6, u_slip_highfreq * 1e6, label=r'$high frequency$')
ax[2].plot(x*1e6, error_brownian_steady, label=r'$error_{steady}$')
ax[2].plot(x*1e6, error_brownian_quasisteady, label=r'$error_{quasi-steady}$')
ax[2].plot(x*1e6, error_brownian_highfreq, label=r'$error_{high-frequency}$')
ax[2].axhline(y=-0.2, xmin=x[0]*1e6, xmax=x[-1]*1e6, color='gray', linestyle='dashed', linewidth=2, alpha=0.65, label=r'$error_{max-acceptable}$')
ax[2].axhline(y=0.2, xmin=x[0] * 1e6, xmax=x[-1] * 1e6, color='gray', linestyle='dashed', linewidth=2, alpha=0.65,)
ax[0].set_ylabel(r'$\zeta_{induced} (mV)$')
ax[0].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
ax[1].set_ylabel(r'$U_{slip, induced} (\mu m/s)$')
ax[1].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
ax[2].set_ylim(bottom=-0.5, top=0.5)
ax[2].set_ylabel(r'$\epsilon_{x} (\frac{\sigma_{x}}{\Delta x})$')
ax[2].set_xlabel(r'$x (\mu m)$')
ax[2].set_title((r'Relative Error $(\Delta x = $')+str(u_slip_error_scale*100)+(r'% of $\frac{U_{slip}}{\Delta t})$'))
ax[2].legend(fancybox=True, loc="upper left", bbox_to_anchor=[1.01, 1])
plt.suptitle('BPE-ICEO: E={} V/mm, f={} Hz'.format(E*1e-3, int(f)))
plt.tight_layout()
plt.show()
# compile into dictionary
iceo_stats_dict = {
'electric_field_strength': E,
'frequency': f,
'fluid': electrolyte_material,
'fluid_viscosity': mu,
'fluid_density': rho,
'fluid_temperature': T,
'fluid_pH': pH,
'fluid_concentration': c,
'fluid_conductivity': sigma,
'fluid_permittivity': eps_fluid,
'l_bpe': L_bpe,
'dielectric': dielectric_material,
'dielectric_thickness': dielectric_thickness,
'solid_permittivity': eps_dielectric,
'solid_reaction_site_density': reaction_site_density,
'solid_Ka': Ka,
'solid_zeta': zeta_wall,
'channel_height': channel_height,
'u_HS': u_HS,
'flow_Re': Re,
'capacitance_dielectric': Cd,
'capacitance_Cdl_linear': Cdl_linear,
#'capacitance_Cdl_nonlinear': Cdl_nonlinear, # should be plotted
'capacitance_Cbuff': Cbuff,
'capacitance_total': total_capacitance,
'beta': beta,
'delta': delta,
'tau': tau,
'debye_length': lamb,
'max_zeta_induced': np.max(zeta_induced),
'max_zeta_induced_quasisteady': np.max(zeta_induced_quasisteady), # should be plotted
'max_zeta_induced_highfreq': np.max(zeta_induced_highfreq), # should be plotted
'max_zeta_induced_Clamb_Cd_linear': np.max(zeta_induced_Clamb_Cd_linear), # should be plotted
'max_zeta_induced_Clamb_Cd_nonlinear': np.max(zeta_induced_Clamb_Cd_nonlinear), # should be plotted
'max_zeta_induced_total_capacitance': np.max(zeta_induced_total_capacitance), # should be plotted
'max_u_slip': np.max(u_slip), # should be plotted
'u_UbyUo': u_UbyUo,
'max_u_slip_quasisteady': np.max(u_slip_quasisteady), # should be plotted
'max_u_slip_highfreq': np.max(u_slip_highfreq), # should be plotted
'u_ratio_slip_to_HS': u_ratio_slip_to_HS
}
# append to storage list
electric_fields.append(E)
frequencys.append(f)
dielectrics.append(dielectric_material)
buffers.append(electrolyte_material)
UbyUo.append(u_UbyUo)
raw_uvel_max.append(np.max(u_slip))
uvel_brownian_error_quasisteady.append(error_brownian_quasisteady)
uvel_brownian_error_highfreq.append(error_brownian_highfreq)
raw_slope.append(u_slope)
betas.append(beta)
deltas.append(delta)
taus.append(tau)
d_eps.append(eps_dielectric)
d_pKa.append(Ka)
d_Ns.append(reaction_site_density)
d_thick.append(dielectric_thickness)
b_conc.append(c)
b_conduct.append(sigma)
b_pH.append(pH)
b_viscosity.append(mu)
b_eps.append(eps_fluid)
b_debye.append(lamb)
voltages.append(V_channel)
electrode_spacings.append(L)
# make numpy arrays of correct datatype
# append to storage list
electric_fields = np.array(electric_fields, dtype=float)
frequencys = np.array(frequencys, dtype=float)
dielectrics = np.array(dielectrics, dtype=str)
buffers = np.array(buffers, dtype=str)
UbyUo = np.array(UbyUo, dtype=float)
raw_uvel_max = np.array(raw_uvel_max, dtype=float)
uvel_brownian_error_quasisteady = np.array(uvel_brownian_error_quasisteady, dtype=float)
uvel_brownian_error_highfreq = np.array(uvel_brownian_error_highfreq, dtype=float)
raw_slope = np.array(raw_slope, dtype=float)
betas = np.array(betas, dtype=float)
deltas = np.array(deltas, dtype=float)
taus = np.array(taus, dtype=float)
d_eps = np.array(d_eps, dtype=float)
d_pKa = np.array(d_pKa, dtype=float)
d_Ns = np.array(d_Ns, dtype=float)
d_thick = np.array(d_thick, dtype=float)
b_conc = np.array(b_conc, dtype=float)
b_conduct = np.array(b_conduct, dtype=float)
b_pH = np.array(b_pH, dtype=float)
b_viscosity = np.array(b_viscosity, dtype=float)
b_eps = np.array(b_eps, dtype=float)
b_debye = np.array(b_debye, dtype=float)
voltages = np.array(voltages, dtype=float)
electrode_spacings = np.array(electrode_spacings, dtype=float)
iceo_stats = np.vstack((electric_fields, frequencys, dielectrics, buffers,
UbyUo, raw_uvel_max, raw_slope, betas, deltas, taus,
d_eps, d_pKa, d_Ns, d_thick,
b_conc, b_conduct, b_pH, b_viscosity, b_eps, b_debye,
voltages, electrode_spacings)).T
header = "electric_fields,frequencys,dielectrics,buffers,UbyUo,raw_uvel_max,raw_slope,beta,delta,tau,d_eps,d_pKa,d_Ns,d_thick,b_conc,b_conduct,b_pH,b_viscosity,b_eps,b_debye,voltages,electrode_spacings"
if savePath:
# Write to .csv file
np.savetxt(savePath, iceo_stats, | |
return new_offset+value, \
data_list[new_offset:new_offset+value].decode("utf-8")
elif code == 21:
dtime = OrderedDict()
new_offset, year = get_from_list(data_list, start_offset, 15) # USHORT
year = 1900 + year
dtime['Y'] = year
v1 = ord(data_list[new_offset:new_offset+1])
new_offset += 1
result = bin(v1)[2:].zfill(8)
tz = result[0:4]
m = result[4:8]
dtime['TZ'] = tz
dtime['M'] = m
new_offset, day = get_from_list(data_list, new_offset, 15) # USHORT
dtime['D'] = day
new_offset, hours = get_from_list(data_list, new_offset, 15) # USHORT
dtime['H'] = hours
new_offset, minutes = get_from_list(data_list, new_offset, 15) # USHORT
dtime['MN'] = minutes
new_offset, seconds = get_from_list(data_list, new_offset, 15) # USHORT
dtime['S'] = seconds
new_offset, milliseconds = get_from_list(data_list, new_offset, 16) # UNORM
dtime['MS'] = milliseconds
return new_offset, dtime
elif code == 23:
new_offset, O = get_from_list(data_list, start_offset, 22) # ORIGIN
new_offset, C = get_from_list(data_list, new_offset, 15) # USHORT
new_offset, I = get_from_list(data_list, new_offset, 19) # IDENT
return new_offset, (O, C, I)
# O = Origin Reference
# C = Copy Number
# I = Identifier
elif code == 24:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, new_offset, 23) # OBNAME
objref = OrderedDict()
objref['T'] = T
objref['N'] = N
# T = obj type - N = obj name
return new_offset, objref
elif code == 25:
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
new_offset, N = get_from_list(data_list, start_offset, 23) # OBNAME
new_offset, T = get_from_list(data_list, start_offset, 19) # IDENT
raise Exception()
# T = Object Type
# N = Object Name
# L = Attribute Label
elif code == 26:
new_offset, value = get_from_list(data_list, start_offset, 15) # USHORT
if value == 0:
return False
if value == 1:
return True
raise Exception()
elif code == 28:
v1 = ord(data_list[start_offset:start_offset+1])
result = bin(v1)[2:].zfill(8)
ret = []
for i in range(len(result)):
ret.append(int(result[i]))
return start_offset+1, ret
"""
0: Logical Record Structure
0 = Indirectly Formatted Logical Record
1 = Explicitly Formatted Logical Record
1: Predecessor
0 = This is the first segment of the Logical Record
1 = This is not the first segment of the Logical Record
2: Successor
0 = This is the last Segment of the Logical Record.
1 = This is not the last Segment of the Logical Record
3: Encryption
0 = No encryption.
1 = Logical Record is encrypted
4: Encryption Packet
0 = No Logical Record Segment Encryption Packet
1 = Logical Record Segment Encryption Packet is present
5: Checksum
0 = No checksum
1 = A checksum is present in the LRST
6: Trailing Length
0 = No Trailing Length
1 = A copy of the LRS lengt is present in the LRST
7: Padding
0 = No record padding
1 = Pad bytes are present in LRST
"""
"""
Given a Explicitly Formatted Logical Record (EFLR) code, returns its type,
description and allowed set types.
"""
'''
def get_EFLR_for_code(EFLR_code):
if not isinstance(EFLR_code, int):
raise Exception('EFLR_code must be a int value.')
if EFLR_code < 0 or EFLR_code > 127:
raise Exception('EFLR code does not exist.')
if EFLR_code > 11:
raise Exception('Undefined or reserved EFLR code are not available at this time.')
ret = {}
if EFLR_code == 0:
ret['type'] = 'FHLR'
ret['desc'] = 'File Header'
ret['allow'] = ['FILE-HEADER']
elif EFLR_code == 1:
ret['type'] = 'OLR'
ret['desc'] = 'Origin'
ret['allow'] = ['ORIGIN', 'WELL-REFERENCE']
elif EFLR_code == 2:
ret['type'] = 'AXIS'
ret['desc'] = 'Coordinate Axis'
ret['allow'] = ['AXIS']
elif EFLR_code == 3:
ret['type'] = 'CHANNL'
ret['desc'] = 'Channel-related information'
ret['allow'] = ['CHANNEL']
elif EFLR_code == 4:
ret['type'] = 'FRAME'
ret['desc'] = 'Frame Data'
ret['allow'] = ['FRAME', 'PATH']
elif EFLR_code == 5:
ret['type'] = 'STATIC'
ret['desc'] = 'Static Data'
ret['allow'] = ['CALIBRATION', 'CALIBRATION-COEFFICIENT', \
'CALIBRATION-MEASUREMENT', 'COMPUTATION', 'EQUIPMENT', 'GROUP',\
'PARAMETER', 'PROCESS', 'SPICE', 'TOOL', 'ZONE']
elif EFLR_code == 6:
ret['type'] = 'SCRIPT'
ret['desc'] = 'Textual Data'
ret['allow'] = ['COMMENT']
elif EFLR_code == 7:
ret['type'] = 'UPDATE'
ret['desc'] = 'Update Data'
ret['allow'] = ['UPDATE']
elif EFLR_code == 8:
ret['type'] = 'UDI'
ret['desc'] = 'Unformatted Data Identifier'
ret['allow'] = ['NO-FORMAT']
elif EFLR_code == 9:
ret['type'] = 'LNAME'
ret['desc'] = 'Long Name'
ret['allow'] = ['LONG-NAME']
elif EFLR_code == 10:
ret['type'] = 'SPEC'
ret['desc'] = 'Specificfation'
ret['allow'] = ['ATTRIBUTE', 'CODE', 'EFLR', 'IFLR', 'OBJECT-TYPE',\
'REPRESENTATION-CODE', 'SPECIFICATION', 'UNIT-SYMBOL']
elif EFLR_code == 11:
ret['type'] = 'DICT'
ret['desc'] = 'Dictionary'
ret['allow'] = ['BASE-DICTIONARY', 'IDENTIFIER', 'LEXICON', 'OPTION']
return ret
'''
def get_objname_from_tuple(obj_name_tuple):
"""Given a O, C, I tuple, return its string full name
(e.g 0&0&DEFINING_ORIGIN).
"""
O, C, I = obj_name_tuple
return str(O) + '&' + str(C) + '&' + I
def get_actual_objname(full_object_name):
"""Given a object string full name (e.g 0&0&DEFINING_ORIGIN), returns
its name (e.g DEFINING_ORIGIN).
"""
return full_object_name.split('&')[2]
class RepresentationCodes(object):
instance = None
def __init__(self):
# base_path == this floder
base_path = os.path.dirname(os.path.abspath(__file__))
rc_json_file = 'representation_codes.json'
self.codes = app.app_utils.read_json_file(
os.path.join(base_path, rc_json_file)
)
@classmethod
def start(cls):
if cls.instance is None:
cls.instance = RepresentationCodes()
@classmethod
def get_code(cls, code):
val = None
if cls.instance:
val = cls.instance.codes[code-1]
return val
class DLISObjectPool(object):
current_file_number = -1
current_lr = -1
lrs = None
objects = None
lr_to_object = None
object_to_lr = None
@classmethod
def init_pool(cls):
"""Init DLISObjectPool attributes.
"""
cls.current_file_number = -1
cls.current_lr = -1
cls.lrs = OrderedDict()
cls.objects = OrderedDict()
cls.lr_to_object = OrderedDict()
cls.object_to_lr = OrderedDict()
@classmethod
def register_logical_record(cls, lr_structure_type, lr_type, lr_code):
"""Register a new Logical Record, with its structure type, LR type,
LR code.
"""
if lr_structure_type != 0 and lr_structure_type != 1:
raise Exception('Logical Record Structure type invalid. ' +
'Valid types are 0 for IFLRs or 1 for EFLR.')
# Starting a new logical file
if lr_type == 'FILE-HEADER':
if cls.lrs is None:
cls.init_pool()
cls.current_file_number += 1
cls.lrs[cls.current_file_number] = OrderedDict()
cls.lr_to_object[cls.current_file_number] = OrderedDict()
cls.object_to_lr[cls.current_file_number] = OrderedDict()
cls.current_lr = 0
else:
cls.current_lr += 1
new_set = OrderedDict()
new_set['type'] = lr_type
new_set['code'] = lr_code
new_set['structure_type'] = lr_structure_type
new_set['template'] = []
new_set['closed'] = False
cls.lrs.get(cls.current_file_number)[cls.current_lr] = new_set
cls.lr_to_object.get(cls.current_file_number)[lr_type] = []
@classmethod
def register_object(cls, object_name):
"""Register a new DLIS Object, with its name.
"""
if not cls.get_logical_records()[-1].get('closed'):
cls.get_logical_records()[-1]['closed'] = True
if cls.objects.get(cls.current_file_number) is None:
cls.objects[cls.current_file_number] = OrderedDict()
cls.objects.get(cls.current_file_number)[object_name] = []
current_lr = cls.get_logical_records()[-1]
cls.object_to_lr.get(cls.current_file_number)[object_name] = current_lr.get('type')
cls.lr_to_object.get(cls.current_file_number).get(current_lr.get('type')).append(object_name)
@classmethod
def get_logical_records(cls, file_number=None):
if file_number is None:
file_number = cls.current_file_number
return list(cls.lrs.get(file_number).values())
@classmethod
def get_logical_record(cls, lr_type, file_number=None):
for lr in cls.get_logical_records(file_number):
if lr.get('type') == lr_type:
return lr
return None
@classmethod
def get_objects_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_names = cls.lr_to_object.get(file_number).get(lr_type)
ret_map = OrderedDict()
if not obj_names:
return ret_map
for obj_name in obj_names:
ret_map[obj_name] = cls.objects.get(cls.current_file_number).get(obj_name)
return ret_map
@classmethod
def get_objects_dict_of_type(cls, lr_type, file_number=None):
if file_number is None:
file_number = cls.current_file_number
ret_map = OrderedDict()
objects = cls.get_objects_of_type(lr_type, file_number)
if not objects:
return ret_map
template_list = cls.get_logical_record(lr_type, file_number).get('template')
for obj_name, obj_values in objects.items():
obj_map = OrderedDict()
for idx, value in enumerate(obj_values):
#print 'idx', idx, template_list[idx]
obj_map[template_list[idx].get('name')] = value
ret_map[obj_name] = obj_map
return ret_map
@classmethod
def get_object_values_list(cls, object_name, file_number=None):
"""Given a object name (e.g 0&0&WN or 1&0&RHOB) return its values list.
If file_number is not given, the latest one will be used.
"""
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.objects.get(file_number).get(object_name)
return obj_values_list
@classmethod
def get_object_values_dict(cls, object_name, file_number=None):
if file_number is None:
file_number = cls.current_file_number
obj_values_list = cls.get_object_values_list(object_name, file_number)
if obj_values_list is None:
return None
lr_type = cls.object_to_lr.get(file_number).get(object_name)
ret_map = OrderedDict()
for set_map in list(cls.lrs.get(file_number).values()):
if set_map.get('type') == lr_type:
for idx, template in enumerate(set_map.get('template')):
try:
ret_map[template.get('name')] = obj_values_list[idx]
except IndexError:
return ret_map
return ret_map
def _get_SUL(data):
# Getting Storage Unit Label (SUL)
if len(data) != 80 and len(data) != 128:
raise Exception('Input data size not according excepted (Excepted 80 or 120 bytes).')
SUL = OrderedDict()
SUL['Storage unit sequence number'] = data[0:4].decode("utf-8").strip()
SUL['RP66 version and format edition'] = data[4:9].decode("utf-8").strip() | |
JavaScript code for interactivity. If the canvas contains animation, the
markup will include an HTML user interface to control playback.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be rendered.
style: dict, optional
Dictionary of CSS styles that will be applied to the top-level output <div>.
Returns
-------
html: str
HTML representation of `canvas` as a string.
Notes
-----
The output HTML is a fragment wrapped in a <div>, suitable for embedding in
a larger document. It is the caller's responsibility to supply the <html>,
<body> etc. if the result is intended as a standalone HTML document.
"""
return xml.tostring(render(canvas=canvas, style=style), encoding="unicode", method="html")
def _color_fixup(styles):
"""It turns-out that many applications and libraries (Inkscape, Adobe Illustrator, Qt)
don't handle CSS rgba() colors correctly. So convert them to CSS rgb colors and use
fill-opacity / stroke-opacity instead."""
if "fill" in styles:
color = toyplot.color.css(styles["fill"])
if color is not None:
opacity = float(styles.get("fill-opacity", 1.0))
styles["fill"] = "rgb(%.3g%%,%.3g%%,%.3g%%)" % (
color["r"] * 100, color["g"] * 100, color["b"] * 100)
styles["fill-opacity"] = str(color["a"] * opacity)
if "stroke" in styles:
color = toyplot.color.css(styles["stroke"])
if color is not None:
opacity = float(styles.get("stroke-opacity", 1.0))
styles["stroke"] = "rgb(%.3g%%,%.3g%%,%.3g%%)" % (
color["r"] * 100, color["g"] * 100, color["b"] * 100)
styles["stroke-opacity"] = str(color["a"] * opacity)
return styles
def _css_style(*styles):
style = _color_fixup(toyplot.style.combine(*styles))
return ";".join(["%s:%s" % (key, value)
for key, value in sorted(style.items())])
def _css_attrib(*styles):
style = _color_fixup(toyplot.style.combine(*styles))
attrib = {}
if style:
attrib["style"] = ";".join(
["%s:%s" % (key, value) for key, value in sorted(style.items())])
return attrib
def _flat_contiguous(a):
i = 0
result = []
for (k, g) in itertools.groupby(a.ravel()):
n = len(list(g))
if k:
result.append(slice(i, i + n))
i += n
return result
def _walk_tree(node):
yield ("start", node.tag, node.attrib)
if node.text:
yield ("text", node.text)
for child in node:
for item in _walk_tree(child):
yield item
yield ("end", node.tag)
if node.tail:
yield ("text", node.tail)
def _draw_text(
root,
text,
x=0,
y=0,
style=None,
angle=None,
title=None,
attributes=None,
):
if not text:
return
style = toyplot.style.combine({"font-family": "helvetica"}, style)
if attributes is None:
attributes = {}
fonts = toyplot.font.ReportlabLibrary()
layout = text if isinstance(text, toyplot.text.Layout) else toyplot.text.layout(text, style, fonts)
transform = ""
if x or y:
transform += "translate(%r,%r)" % (x, y)
if angle:
transform += "rotate(%r)" % (-angle) # pylint: disable=invalid-unary-operand-type
group = xml.SubElement(
root,
"g",
attrib=attributes,
)
if transform:
group.set("transform", transform)
if title is not None:
xml.SubElement(group, "title").text = str(title)
layout_opacity = 0.5
layout_stroke_width = 1
if layout.style.get("-toyplot-text-layout-visibility", None) == "visible": # pragma: no cover
xml.SubElement(
group,
"rect",
x=str(layout.left),
y=str(layout.top),
width=str(layout.width),
height=str(layout.height),
stroke="red",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
xml.SubElement(
group,
"circle",
x="0",
y="0",
r="1.5",
stroke="red",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
hyperlink = []
for line in layout.children:
if line.style.get("-toyplot-text-layout-line-visibility", None) == "visible": # pragma: no cover
xml.SubElement(
group,
"rect",
x=str(line.left),
y=str(line.top),
width=str(line.width),
height=str(line.height),
stroke="green",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
xml.SubElement(
group,
"line",
x1=str(line.left),
y1=str(line.baseline),
x2=str(line.right),
y2=str(line.baseline),
stroke="green",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
for box in line.children:
if isinstance(box, toyplot.text.TextBox):
xml.SubElement(
group,
"text",
x=str(box.left),
y=str(box.baseline),
style=toyplot.style.to_css(box.style),
).text = box.text
if box.style.get("-toyplot-text-layout-box-visibility", None) == "visible": # pragma: no cover
xml.SubElement(
group,
"rect",
x=str(box.left),
y=str(box.top),
width=str(box.width),
height=str(box.height),
stroke="blue",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
xml.SubElement(
group,
"line",
x1=str(box.left),
y1=str(box.baseline),
x2=str(box.right),
y2=str(box.baseline),
stroke="blue",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
elif isinstance(box, toyplot.text.MarkerBox):
if box.marker:
_draw_marker(
group,
cx=(box.left + box.right) * 0.5,
cy=(box.top + box.bottom) * 0.5,
marker=toyplot.marker.create(size=box.height) + box.marker,
)
if box.style.get("-toyplot-text-layout-box-visibility", None) == "visible": # pragma: no cover
xml.SubElement(
group,
"rect",
x=str(box.left),
y=str(box.top),
width=str(box.width),
height=str(box.height),
stroke="blue",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
xml.SubElement(
group,
"line",
x1=str(box.left),
y1=str(box.baseline),
x2=str(box.right),
y2=str(box.baseline),
stroke="blue",
fill="none",
opacity=str(layout_opacity),
attrib={"stroke-width": str(layout_stroke_width)},
)
elif isinstance(box, toyplot.text.PushHyperlink):
hyperlink.append(group)
group = xml.SubElement(
group,
"a",
style=toyplot.style.to_css(box.style),
)
group.set("xlink:href", box.href)
if box.target is not None:
group.set("target", box.target)
elif isinstance(box, toyplot.text.PopHyperlink):
group = hyperlink.pop()
def _draw_bar(parent_xml, size, angle=0):
markup = xml.SubElement(
parent_xml,
"line",
y1=repr(-size / 2),
y2=repr(size / 2),
)
if angle:
markup.set("transform", "rotate(%r)" % (-angle,))
def _draw_rect(parent_xml, size, width=1, height=1, angle=0):
markup = xml.SubElement(
parent_xml,
"rect",
x=repr(-size / 2 * width),
y=repr(-size / 2 * height),
width=repr(size * width),
height=repr(size * height),
)
if angle:
markup.set("transform", "rotate(%r)" % (-angle,))
def _draw_triangle(parent_xml, size, angle=0):
markup = xml.SubElement(
parent_xml,
"polygon",
points=" ".join(["%r,%r" % (xp, yp) for xp, yp in [
(-size / 2, size / 2),
(0, -size / 2),
(size / 2, size / 2),
]]),
)
if angle:
markup.set("transform", "rotate(%r)" % (-angle,))
def _draw_circle(parent_xml, size):
xml.SubElement(
parent_xml,
"circle",
r=repr(size / 2),
)
def _draw_marker(
root,
marker,
cx=None,
cy=None,
extra_class=None,
title=None,
transform=None,
):
attrib = _css_attrib(marker.mstyle)
if extra_class is not None:
attrib["class"] = extra_class
marker_xml = xml.SubElement(root, "g", attrib=attrib)
if title is not None:
xml.SubElement(marker_xml, "title").text = str(title)
if transform is None:
transform = "translate(%r, %r)" % (cx, cy)
if marker.angle:
transform += " rotate(%r)" % (-marker.angle,)
marker_xml.set("transform", transform)
if marker.shape == "|":
_draw_bar(marker_xml, marker.size)
elif marker.shape == "/":
_draw_bar(marker_xml, marker.size, angle=-45)
elif marker.shape == "-":
_draw_bar(marker_xml, marker.size, angle=90)
elif marker.shape == "\\":
_draw_bar(marker_xml, marker.size, angle=45)
elif marker.shape == "+":
_draw_bar(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, angle=90)
elif marker.shape == "x":
_draw_bar(marker_xml, marker.size, angle=-45)
_draw_bar(marker_xml, marker.size, angle=45)
elif marker.shape == "*":
_draw_bar(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, angle=-60)
_draw_bar(marker_xml, marker.size, angle=60)
elif marker.shape == "^":
_draw_triangle(marker_xml, marker.size)
elif marker.shape == ">":
_draw_triangle(marker_xml, marker.size, angle=-90)
elif marker.shape == "v":
_draw_triangle(marker_xml, marker.size, angle=180)
elif marker.shape == "<":
_draw_triangle(marker_xml, marker.size, angle=90)
elif marker.shape == "s":
_draw_rect(marker_xml, marker.size)
elif marker.shape == "d":
_draw_rect(marker_xml, marker.size, angle=45)
elif marker.shape and marker.shape[0] == "r":
width, height = marker.shape[1:].split("x")
_draw_rect(marker_xml, marker.size, width=float(width), height=float(height))
elif marker.shape == "o":
_draw_circle(marker_xml, marker.size)
elif marker.shape == "oo":
_draw_circle(marker_xml, marker.size)
_draw_circle(marker_xml, marker.size / 2)
elif marker.shape == "o|":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size)
elif marker.shape == "o/":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, -45)
elif marker.shape == "o-":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, 90)
elif marker.shape == "o\\":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, 45)
elif marker.shape == "o+":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, 90)
elif marker.shape == "ox":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, -45)
_draw_bar(marker_xml, marker.size, 45)
elif marker.shape == "o*":
_draw_circle(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size)
_draw_bar(marker_xml, marker.size, -60)
_draw_bar(marker_xml, marker.size, 60)
if marker.label: # Never compute a text layout unless we have to.
_draw_text(
root=marker_xml,
text=marker.label,
style=toyplot.style.combine(
{
"-toyplot-vertical-align": "middle",
"fill": toyplot.color.black,
"font-size": "%rpx" % (marker.size * 0.75),
"stroke": "none",
"text-anchor": "middle",
},
marker.lstyle),
)
return marker_xml
def _axis_transform(x1, y1, x2, y2, offset, return_length=False):
p = numpy.row_stack(((x1, y1), (x2, y2)))
basis = p[1] - p[0]
length = numpy.linalg.norm(basis)
theta = numpy.rad2deg(numpy.arctan2(basis[1], basis[0]))
transform = str()
if p[0][0] or p[0][1]:
transform += "translate(%s,%s)" % (p[0][0], p[0][1])
if theta:
transform += "rotate(%s)" % theta
if offset:
transform += "translate(0,%s)" % offset
if return_length:
return transform, length
return transform
@dispatch(toyplot.canvas.Canvas, RenderContext)
def _render(canvas, context):
# Optionally apply a hyperlink to the entire canvas.
parent_xml = context.parent
if canvas._hyperlink:
hyperlink_xml = xml.SubElement(parent_xml, "a", attrib={"href": canvas._hyperlink})
parent_xml = hyperlink_xml
# Create the root SVG element.
svg_xml = xml.SubElement(
parent_xml,
"svg",
xmlns="http://www.w3.org/2000/svg",
attrib={
"class": "toyplot-canvas-Canvas",
"xmlns:toyplot": "http://www.sandia.gov/toyplot",
"xmlns:xlink": "http://www.w3.org/1999/xlink",
},
width="%rpx" % canvas.width,
height="%rpx" % canvas.height,
viewBox="0 0 %r %r" % (canvas.width, canvas.height),
preserveAspectRatio="xMidYMid meet",
style=_css_style(canvas._style),
id=context.get_id(canvas))
# Render everything on the canvas.
for child in canvas._children:
_render(canvas, child._finalize(), context.copy(parent=svg_xml))
# Create a container for any Javascript code.
javascript_xml = xml.SubElement(
context.parent,
"div",
attrib={"class": "toyplot-behavior"},
)
# Register a Javascript module to keep track of the canvas id.
context.define("toyplot/canvas/id", value=context.get_id(canvas))
# Register a Javascript module to keep track of the canvas.
context.define("toyplot/canvas", ["toyplot/canvas/id"], factory="""function(canvas_id)
{
return document.querySelector("#" + canvas_id);
}""")
# Register a Javascript module for storing table data.
context.define("toyplot/tables", factory="""function()
{
var tables = [];
var module = {};
module.set = function(owner, key, names, columns)
{
tables.push({owner: owner, key: key, names: names, columns: columns});
}
module.get = function(owner, key)
{
for(var i = 0; i != tables.length; ++i)
{
var table = tables[i];
if(table.owner != owner)
continue;
if(table.key != key)
continue;
return {names: table.names, columns: table.columns};
}
}
module.get_csv = function(owner, key)
{
var table = module.get(owner, key);
if(table != undefined)
{
var csv = "";
csv += table.names.join(",") + "\\n";
for(var i = 0; i != table.columns[0].length; ++i)
{
for(var j = 0; j != | |
<gh_stars>1-10
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# [MS-DCOM] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
# Author:
# <NAME> (@agsolino)
#
# ToDo:
# [X] Use the same DCE connection for all the calls. Right now is connecting to the remote machine
# for each call, making it slower.
# [X] Implement a ping mechanism, otherwise the garbage collector at the server shuts down the objects if
# not used, returning RPC_E_DISCONNECTED
#
from __future__ import division
from __future__ import print_function
import socket
from struct import pack
from threading import Timer, current_thread
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDRPOINTER, NDRUniConformantArray, NDRTLSTRUCT, UNKNOWNDATA
from impacket.dcerpc.v5.dtypes import LPWSTR, ULONGLONG, HRESULT, GUID, USHORT, WSTR, DWORD, LPLONG, LONG, PGUID, ULONG, \
UUID, WIDESTR, NULL
from impacket import hresult_errors, LOG
from impacket.uuid import string_to_bin, uuidtup_to_bin, generate
from impacket.dcerpc.v5.rpcrt import TypeSerialization1, RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, RPC_C_AUTHN_LEVEL_NONE, \
RPC_C_AUTHN_LEVEL_PKT_PRIVACY, RPC_C_AUTHN_GSS_NEGOTIATE, RPC_C_AUTHN_WINNT, DCERPCException
from impacket.dcerpc.v5 import transport
CLSID_ActivationContextInfo = string_to_bin('000001a5-0000-0000-c000-000000000046')
CLSID_ActivationPropertiesIn = string_to_bin('00000338-0000-0000-c000-000000000046')
CLSID_ActivationPropertiesOut = string_to_bin('00000339-0000-0000-c000-000000000046')
CLSID_CONTEXT_EXTENSION = string_to_bin('00000334-0000-0000-c000-000000000046')
CLSID_ContextMarshaler = string_to_bin('0000033b-0000-0000-c000-000000000046')
CLSID_ERROR_EXTENSION = string_to_bin('0000031c-0000-0000-c000-000000000046')
CLSID_ErrorObject = string_to_bin('0000031b-0000-0000-c000-000000000046')
CLSID_InstanceInfo = string_to_bin('000001ad-0000-0000-c000-000000000046')
CLSID_InstantiationInfo = string_to_bin('000001ab-0000-0000-c000-000000000046')
CLSID_PropsOutInfo = string_to_bin('00000339-0000-0000-c000-000000000046')
CLSID_ScmReplyInfo = string_to_bin('000001b6-0000-0000-c000-000000000046')
CLSID_ScmRequestInfo = string_to_bin('000001aa-0000-0000-c000-000000000046')
CLSID_SecurityInfo = string_to_bin('000001a6-0000-0000-c000-000000000046')
CLSID_ServerLocationInfo = string_to_bin('000001a4-0000-0000-c000-000000000046')
CLSID_SpecialSystemProperties = string_to_bin('000001b9-0000-0000-c000-000000000046')
IID_IActivation = uuidtup_to_bin(('4d9f4ab8-7d1c-11cf-861e-0020af6e7c57','0.0'))
IID_IActivationPropertiesIn = uuidtup_to_bin(('000001A2-0000-0000-C000-000000000046','0.0'))
IID_IActivationPropertiesOut = uuidtup_to_bin(('000001A3-0000-0000-C000-000000000046','0.0'))
IID_IContext = uuidtup_to_bin(('000001c0-0000-0000-C000-000000000046','0.0'))
IID_IObjectExporter = uuidtup_to_bin(('99fcfec4-5260-101b-bbcb-00aa0021347a','0.0'))
IID_IRemoteSCMActivator = uuidtup_to_bin(('000001A0-0000-0000-C000-000000000046','0.0'))
IID_IRemUnknown = uuidtup_to_bin(('00000131-0000-0000-C000-000000000046','0.0'))
IID_IRemUnknown2 = uuidtup_to_bin(('00000143-0000-0000-C000-000000000046','0.0'))
IID_IUnknown = uuidtup_to_bin(('00000000-0000-0000-C000-000000000046','0.0'))
IID_IClassFactory = uuidtup_to_bin(('00000001-0000-0000-C000-000000000046','0.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
if self.error_code in hresult_errors.ERROR_MESSAGES:
error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1]
return 'DCOM SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'DCOM SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# 2.2.1 OID
OID = ULONGLONG
class OID_ARRAY(NDRUniConformantArray):
item = OID
class POID_ARRAY(NDRPOINTER):
referent = (
('Data', OID_ARRAY),
)
# 2.2.2 SETID
SETID = ULONGLONG
# 2.2.4 error_status_t
error_status_t = ULONG
# 2.2.6 CID
CID = GUID
# 2.2.7 CLSID
CLSID = GUID
# 2.2.8 IID
IID = GUID
PIID = PGUID
# 2.2.9 IPID
IPID = GUID
# 2.2.10 OXID
OXID = ULONGLONG
# 2.2.18 OBJREF
FLAGS_OBJREF_STANDARD = 0x00000001
FLAGS_OBJREF_HANDLER = 0x00000002
FLAGS_OBJREF_CUSTOM = 0x00000004
FLAGS_OBJREF_EXTENDED = 0x00000008
# 2.2.18.1 STDOBJREF
SORF_NOPING = 0x00001000
# 2.2.20 Context
CTXMSHLFLAGS_BYVAL = 0x00000002
# 2.2.20.1 PROPMARSHALHEADER
CPFLAG_PROPAGATE = 0x00000001
CPFLAG_EXPOSE = 0x00000002
CPFLAG_ENVOY = 0x00000004
# 2.2.22.2.1 InstantiationInfoData
ACTVFLAGS_DISABLE_AAA = 0x00000002
ACTVFLAGS_ACTIVATE_32_BIT_SERVER = 0x00000004
ACTVFLAGS_ACTIVATE_64_BIT_SERVER = 0x00000008
ACTVFLAGS_NO_FAILURE_LOG = 0x00000020
# 2.2.22.2.2 SpecialPropertiesData
SPD_FLAG_USE_CONSOLE_SESSION = 0x00000001
# 172.16.31.10 IDL Range Constants
MAX_REQUESTED_INTERFACES = 0x8000
MAX_REQUESTED_PROTSEQS = 0x8000
MIN_ACTPROP_LIMIT = 1
MAX_ACTPROP_LIMIT = 10
################################################################################
# STRUCTURES
################################################################################
class handle_t(NDRSTRUCT):
structure = (
('context_handle_attributes',ULONG),
('context_handle_uuid',UUID),
)
def __init__(self, data=None, isNDR64=False):
NDRSTRUCT.__init__(self, data, isNDR64)
self['context_handle_uuid'] = b'\x00'*16
def isNull(self):
return self['context_handle_uuid'] == b'\x00'*16
# 2.2.11 COMVERSION
class COMVERSION(NDRSTRUCT):
default_major_version = 5
default_minor_version = 7
structure = (
('MajorVersion',USHORT),
('MinorVersion',USHORT),
)
@classmethod
def set_default_version(cls, major_version=None, minor_version=None):
# Set default dcom version for all new COMVERSION objects.
if major_version is not None:
cls.default_major_version = major_version
if minor_version is not None:
cls.default_minor_version = minor_version
def __init__(self, data = None,isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['MajorVersion'] = self.default_major_version
self['MinorVersion'] = self.default_minor_version
class PCOMVERSION(NDRPOINTER):
referent = (
('Data', COMVERSION),
)
# 2.2.13.1 ORPC_EXTENT
# This MUST contain an array of bytes that form the extent data.
# The array size MUST be a multiple of 8 for alignment reasons.
class BYTE_ARRAY(NDRUniConformantArray):
item = 'c'
class ORPC_EXTENT(NDRSTRUCT):
structure = (
('id',GUID),
('size',ULONG),
('data',BYTE_ARRAY),
)
# 2.2.13.2 ORPC_EXTENT_ARRAY
# ThisMUSTbeanarrayofORPC_EXTENTs.ThearraysizeMUSTbeamultipleof2for alignment reasons.
class PORPC_EXTENT(NDRPOINTER):
referent = (
('Data', ORPC_EXTENT),
)
class EXTENT_ARRAY(NDRUniConformantArray):
item = PORPC_EXTENT
class PEXTENT_ARRAY(NDRPOINTER):
referent = (
('Data', EXTENT_ARRAY),
)
class ORPC_EXTENT_ARRAY(NDRSTRUCT):
structure = (
('size',ULONG),
('reserved',ULONG),
('extent',PEXTENT_ARRAY),
)
class PORPC_EXTENT_ARRAY(NDRPOINTER):
referent = (
('Data', ORPC_EXTENT_ARRAY),
)
# 2.2.13.3 ORPCTHIS
class ORPCTHIS(NDRSTRUCT):
structure = (
('version',COMVERSION),
('flags',ULONG),
('reserved1',ULONG),
('cid',CID),
('extensions',PORPC_EXTENT_ARRAY),
)
# 2.2.13.4 ORPCTHAT
class ORPCTHAT(NDRSTRUCT):
structure = (
('flags',ULONG),
('extensions',PORPC_EXTENT_ARRAY),
)
# 2.2.14 MInterfacePointer
class MInterfacePointer(NDRSTRUCT):
structure = (
('ulCntData',ULONG),
('abData',BYTE_ARRAY),
)
# 2.2.15 PMInterfacePointerInternal
class PMInterfacePointerInternal(NDRPOINTER):
referent = (
('Data', MInterfacePointer),
)
# 2.2.16 PMInterfacePointer
class PMInterfacePointer(NDRPOINTER):
referent = (
('Data', MInterfacePointer),
)
class PPMInterfacePointer(NDRPOINTER):
referent = (
('Data', PMInterfacePointer),
)
# 2.2.18 OBJREF
class OBJREF(NDRSTRUCT):
commonHdr = (
('signature',ULONG),
('flags',ULONG),
('iid',GUID),
)
def __init__(self, data = None,isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['signature'] = 0x574F454D
# 2.2.18.1 STDOBJREF
class STDOBJREF(NDRSTRUCT):
structure = (
('flags',ULONG),
('cPublicRefs',ULONG),
('oxid',OXID),
('oid',OID),
('ipid',IPID),
)
# 2.2.18.4 OBJREF_STANDARD
class OBJREF_STANDARD(OBJREF):
structure = (
('std',STDOBJREF),
('saResAddr',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_STANDARD
# 2.2.18.5 OBJREF_HANDLER
class OBJREF_HANDLER(OBJREF):
structure = (
('std',STDOBJREF),
('clsid',CLSID),
('saResAddr',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_HANDLER
# 2.2.18.6 OBJREF_CUSTOM
class OBJREF_CUSTOM(OBJREF):
structure = (
('clsid',CLSID),
('cbExtension',ULONG),
('ObjectReferenceSize',ULONG),
('pObjectData',':'),
)
def __init__(self, data = None,isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_CUSTOM
# 2.2.18.8 DATAELEMENT
class DATAELEMENT(NDRSTRUCT):
structure = (
('dataID',GUID),
('cbSize',ULONG),
('cbRounded',ULONG),
('Data',':'),
)
class DUALSTRINGARRAYPACKED(NDRSTRUCT):
structure = (
('wNumEntries',USHORT),
('wSecurityOffset',USHORT),
('aStringArray',':'),
)
def getDataLen(self, data, offset=0):
return self['wNumEntries']*2
# 2.2.18.7 OBJREF_EXTENDED
class OBJREF_EXTENDED(OBJREF):
structure = (
('std',STDOBJREF),
('Signature1',ULONG),
('saResAddr',DUALSTRINGARRAYPACKED),
('nElms',ULONG),
('Signature2',ULONG),
('ElmArray',DATAELEMENT),
)
def __init__(self, data = None, isNDR64 = False):
OBJREF.__init__(self, data, isNDR64)
if data is None:
self['flags'] = FLAGS_OBJREF_EXTENDED
self['Signature1'] = 0x4E535956
self['Signature1'] = 0x4E535956
self['nElms'] = 0x4E535956
# 2.2.19 DUALSTRINGARRAY
class USHORT_ARRAY(NDRUniConformantArray):
item = '<H'
class PUSHORT_ARRAY(NDRPOINTER):
referent = (
('Data', USHORT_ARRAY),
)
class DUALSTRINGARRAY(NDRSTRUCT):
structure = (
('wNumEntries',USHORT),
('wSecurityOffset',USHORT),
('aStringArray',USHORT_ARRAY),
)
class PDUALSTRINGARRAY(NDRPOINTER):
referent = (
('Data',DUALSTRINGARRAY),
)
# 2.2.19.3 STRINGBINDING
class STRINGBINDING(NDRSTRUCT):
structure = (
('wTowerId',USHORT),
('aNetworkAddr',WIDESTR),
)
# 2.2.19.4 SECURITYBINDING
class SECURITYBINDING(NDRSTRUCT):
structure = (
('wAuthnSvc',USHORT),
('Reserved',USHORT),
('aPrincName',WIDESTR),
)
# 2.2.20.1 PROPMARSHALHEADER
class PROPMARSHALHEADER(NDRSTRUCT):
structure = (
('clsid',CLSID),
('policyId',GUID),
('flags',ULONG),
('cb',ULONG),
('ctxProperty',':'),
)
class PROPMARSHALHEADER_ARRAY(NDRUniConformantArray):
item = PROPMARSHALHEADER
# 2.2.20 Context
class Context(NDRSTRUCT):
structure = (
('MajorVersion',USHORT),
('MinVersion',USHORT),
('ContextId',GUID),
('Flags',ULONG),
('Reserved',ULONG),
('dwNumExtents',ULONG),
('cbExtents',ULONG),
('MshlFlags',ULONG),
('Count',ULONG),
('Frozen',ULONG),
('PropMarshalHeader',PROPMARSHALHEADER_ARRAY),
)
# 2.2.21.3 ErrorInfoString
class ErrorInfoString(NDRSTRUCT):
structure = (
('dwMax',ULONG),
('dwOffSet',ULONG),
('dwActual',IID),
('Name',WSTR),
)
# 2.2.21.2 Custom-Marshaled Error Information Format
class ORPC_ERROR_INFORMATION(NDRSTRUCT):
structure = (
('dwVersion',ULONG),
('dwHelpContext',ULONG),
('iid',IID),
('dwSourceSignature',ULONG),
('Source',ErrorInfoString),
('dwDescriptionSignature',ULONG),
('Description',ErrorInfoString),
('dwHelpFileSignature',ULONG),
('HelpFile',ErrorInfoString),
)
# 2.2.21.5 EntryHeader
class EntryHeader(NDRSTRUCT):
structure = (
('Signature',ULONG),
('cbEHBuffer',ULONG),
('cbSize',ULONG),
('reserved',ULONG),
('policyID',GUID),
)
class EntryHeader_ARRAY(NDRUniConformantArray):
item = EntryHeader
# 2.2.21.4 Context ORPC Extension
class ORPC_CONTEXT(NDRSTRUCT):
structure = (
('SignatureVersion',ULONG),
('Version',ULONG),
('cPolicies',ULONG),
('cbBuffer',ULONG),
('cbSize',ULONG),
('hr',ULONG),
('hrServer',ULONG),
('reserved',ULONG),
('EntryHeader',EntryHeader_ARRAY),
('PolicyData',':'),
)
def __init__(self, data = None, isNDR64 = False):
NDRSTRUCT.__init__(self, data, isNDR64)
if data is None:
self['SignatureVersion'] = 0x414E554B
# 2.2.22.1 CustomHeader
class CLSID_ARRAY(NDRUniConformantArray):
item = CLSID
class PCLSID_ARRAY(NDRPOINTER):
referent = (
('Data', CLSID_ARRAY),
)
class DWORD_ARRAY(NDRUniConformantArray):
item = DWORD
class PDWORD_ARRAY(NDRPOINTER):
referent = (
('Data', DWORD_ARRAY),
)
class CustomHeader(TypeSerialization1):
structure = (
('totalSize',DWORD),
('headerSize',DWORD),
('dwReserved',DWORD),
('destCtx',DWORD),
('cIfs',DWORD),
('classInfoClsid',CLSID),
('pclsid',PCLSID_ARRAY),
('pSizes',PDWORD_ARRAY),
('pdwReserved',LPLONG),
#('pdwReserved',LONG),
)
def getData(self, soFar = 0):
self['headerSize'] = len(TypeSerialization1.getData(self, soFar)) + len(
TypeSerialization1.getDataReferents(self, soFar))
self['cIfs'] = len(self['pclsid'])
return TypeSerialization1.getData(self, soFar)
# 2.2.22 Activation Properties BLOB
class ACTIVATION_BLOB(NDRTLSTRUCT):
structure = (
('dwSize',ULONG),
('dwReserved',ULONG),
('CustomHeader',CustomHeader),
('Property',UNKNOWNDATA),
)
def getData(self, soFar = 0):
self['dwSize'] = len(self['CustomHeader'].getData(soFar)) + len(
self['CustomHeader'].getDataReferents(soFar)) + len(self['Property'])
self['CustomHeader']['totalSize'] = self['dwSize']
return NDRTLSTRUCT.getData(self)
# 2.2.22.2.1 InstantiationInfoData
class IID_ARRAY(NDRUniConformantArray):
item = IID
class PIID_ARRAY(NDRPOINTER):
referent = (
('Data', IID_ARRAY),
)
class InstantiationInfoData(TypeSerialization1):
structure = (
('classId',CLSID),
('classCtx',DWORD),
('actvflags',DWORD),
('fIsSurrogate',LONG),
('cIID',DWORD),
('instFlag',DWORD),
('pIID',PIID_ARRAY),
('thisSize',DWORD),
('clientCOMVersion',COMVERSION),
)
# 2.2.22.2.2 SpecialPropertiesData
class SpecialPropertiesData(TypeSerialization1):
structure = (
('dwSessionId',ULONG),
('fRemoteThisSessionId',LONG),
('fClientImpersonating',LONG),
('fPartitionIDPresent',LONG),
('dwDefaultAuthnLvl',DWORD),
('guidPartition',GUID),
('dwPRTFlags',DWORD),
('dwOrigClsctx',DWORD),
('dwFlags',DWORD),
('Reserved0',DWORD),
('Reserved0',DWORD),
('Reserved', '32s=""'),
#('Reserved1',DWORD),
#('Reserved2',ULONGLONG),
#('Reserved3_1',DWORD),
#('Reserved3_2',DWORD),
#('Reserved3_3',DWORD),
#('Reserved3_4',DWORD),
#('Reserved3_5',DWORD),
)
# 2.2.22.2.3 InstanceInfoData
class InstanceInfoData(TypeSerialization1):
structure = (
('fileName',LPWSTR),
('mode',DWORD),
('ifdROT',PMInterfacePointer),
('ifdStg',PMInterfacePointer),
)
# 2.2.22.2.4.1 customREMOTE_REQUEST_SCM_INFO
class customREMOTE_REQUEST_SCM_INFO(NDRSTRUCT):
structure = (
('ClientImpLevel',DWORD),
('cRequestedProtseqs',USHORT),
('pRequestedProtseqs',PUSHORT_ARRAY),
)
class PcustomREMOTE_REQUEST_SCM_INFO(NDRPOINTER):
referent = (
('Data', customREMOTE_REQUEST_SCM_INFO),
)
# 2.2.22.2.4 ScmRequestInfoData
class ScmRequestInfoData(TypeSerialization1):
structure = (
('pdwReserved',LPLONG),
('remoteRequest',PcustomREMOTE_REQUEST_SCM_INFO),
)
# 2.2.22.2.5 ActivationContextInfoData
class ActivationContextInfoData(TypeSerialization1):
structure = (
('clientOK',LONG),
('bReserved1',LONG),
('dwReserved1',DWORD),
('dwReserved2',DWORD),
('pIFDClientCtx',PMInterfacePointer),
('pIFDPrototypeCtx',PMInterfacePointer),
)
# 2.2.22.2.6 LocationInfoData
class LocationInfoData(TypeSerialization1):
structure = (
('machineName',LPWSTR),
('processId',DWORD),
('apartmentId',DWORD),
('contextId',DWORD),
)
# 2.2.22.2.7.1 COSERVERINFO
class COSERVERINFO(NDRSTRUCT):
structure = (
('dwReserved1',DWORD),
('pwszName',LPWSTR),
('pdwReserved',LPLONG),
('dwReserved2',DWORD),
)
class PCOSERVERINFO(NDRPOINTER):
referent = (
('Data', COSERVERINFO),
)
# 172.16.58.3.7 SecurityInfoData
class SecurityInfoData(TypeSerialization1):
structure = (
('dwAuthnFlags',DWORD),
('pServerInfo',PCOSERVERINFO),
('pdwReserved',LPLONG),
)
# 2.2.22.2.8.1 customREMOTE_REPLY_SCM_INFO
class customREMOTE_REPLY_SCM_INFO(NDRSTRUCT):
structure = (
('Oxid',OXID),
('pdsaOxidBindings',PDUALSTRINGARRAY),
('ipidRemUnknown',IPID),
('authnHint',DWORD),
('serverVersion',COMVERSION),
)
class PcustomREMOTE_REPLY_SCM_INFO(NDRPOINTER):
referent = (
('Data', customREMOTE_REPLY_SCM_INFO),
)
# 2.2.22.2.8 ScmReplyInfoData
class ScmReplyInfoData(TypeSerialization1):
structure = (
('pdwReserved',DWORD),
('remoteReply',PcustomREMOTE_REPLY_SCM_INFO),
)
# 2.2.22.2.9 PropsOutInfo
class HRESULT_ARRAY(NDRUniConformantArray):
item = HRESULT
class PHRESULT_ARRAY(NDRPOINTER):
referent = (
('Data', HRESULT_ARRAY),
)
class MInterfacePointer_ARRAY(NDRUniConformantArray):
item = MInterfacePointer
class PMInterfacePointer_ARRAY(NDRUniConformantArray):
item = PMInterfacePointer
class PPMInterfacePointer_ARRAY(NDRPOINTER):
referent | |
<reponame>jtemporal/auth0-python
import json
import time
import jwt
import requests
from auth0.v3.exceptions import TokenValidationError
class SignatureVerifier(object):
DISABLE_JWT_CHECKS = {
"verify_signature": True,
"verify_exp": False,
"verify_nbf": False,
"verify_iat": False,
"verify_aud": False,
"verify_iss": False,
"require_exp": False,
"require_iat": False,
"require_nbf": False,
}
"""Abstract class that will verify a given JSON web token's signature
using the key fetched internally given its key id.
Args:
algorithm (str): The expected signing algorithm (e.g. RS256).
"""
def __init__(self, algorithm):
if not algorithm or type(algorithm) != str:
raise ValueError("algorithm must be specified.")
self._algorithm = algorithm
"""Obtains the key associated to the given key id.
Must be implemented by subclasses.
Args:
key_id (str, optional): The id of the key to fetch.
Returns:
the key to use for verifying a cryptographic signature
"""
def _fetch_key(self, key_id=None):
raise NotImplementedError
"""Verifies the signature of the given JSON web token.
Args:
token (str): The JWT to get its signature verified.
Raises:
TokenValidationError: if the token cannot be decoded, the algorithm is invalid
or the token's signature doesn't match the calculated one.
"""
def verify_signature(self, token):
try:
header = jwt.get_unverified_header(token)
except jwt.exceptions.DecodeError:
raise TokenValidationError("ID token could not be decoded.")
alg = header.get('alg', None)
if alg != self._algorithm:
raise TokenValidationError(
'Signature algorithm of "{}" is not supported. Expected the ID token '
'to be signed with "{}"'.format(alg, self._algorithm))
kid = header.get('kid', None)
secret_or_certificate = self._fetch_key(key_id=kid)
try:
decoded = jwt.decode(jwt=token, key=secret_or_certificate,
algorithms=[self._algorithm], options=self.DISABLE_JWT_CHECKS)
except jwt.exceptions.InvalidSignatureError:
raise TokenValidationError("Invalid token signature.")
return decoded
class SymmetricSignatureVerifier(SignatureVerifier):
"""Verifier for HMAC signatures, which rely on shared secrets.
Args:
shared_secret (str): The shared secret used to decode the token.
algorithm (str, optional): The expected signing algorithm. Defaults to "HS256".
"""
def __init__(self, shared_secret, algorithm="HS256"):
super(SymmetricSignatureVerifier, self).__init__(algorithm)
self._shared_secret = shared_secret
def _fetch_key(self, key_id=None):
return self._shared_secret
class AsymmetricSignatureVerifier(SignatureVerifier):
"""Verifier for RSA signatures, which rely on public key certificates.
Args:
jwks_url (str): The url where the JWK set is located.
algorithm (str, optional): The expected signing algorithm. Defaults to "RS256".
"""
def __init__(self, jwks_url, algorithm="RS256"):
super(AsymmetricSignatureVerifier, self).__init__(algorithm)
self._fetcher = JwksFetcher(jwks_url)
def _fetch_key(self, key_id=None):
return self._fetcher.get_key(key_id)
class JwksFetcher(object):
CACHE_TTL = 600 # 10 min cache lifetime
"""Class that fetches and holds a JSON web key set.
This class makes use of an in-memory cache. For it to work properly, define this instance once and re-use it.
Args:
jwks_url (str): The url where the JWK set is located.
cache_ttl (str, optional): The lifetime of the JWK set cache in seconds. Defaults to 600 seconds.
"""
def __init__(self, jwks_url, cache_ttl=CACHE_TTL):
self._jwks_url = jwks_url
self._init_cache(cache_ttl)
return
def _init_cache(self, cache_ttl):
self._cache_value = {}
self._cache_date = 0
self._cache_ttl = cache_ttl
self._cache_is_fresh = False
"""Attempts to obtain the JWK set from the cache, as long as it's still valid.
When not, it will perform a network request to the jwks_url to obtain a fresh result
and update the cache value with it.
Args:
force (bool, optional): whether to ignore the cache and force a network request or not. Defaults to False.
"""
def _fetch_jwks(self, force=False):
has_expired = self._cache_date + self._cache_ttl < time.time()
if not force and not has_expired:
# Return from cache
self._cache_is_fresh = False
return self._cache_value
# Invalidate cache and fetch fresh data
self._cache_value = {}
response = requests.get(self._jwks_url)
if response.ok:
# Update cache
jwks = response.json()
self._cache_value = self._parse_jwks(jwks)
self._cache_is_fresh = True
self._cache_date = time.time()
return self._cache_value
"""Converts a JWK string representation into a binary certificate in PEM format.
"""
@staticmethod
def _parse_jwks(jwks):
keys = {}
for key in jwks['keys']:
# noinspection PyUnresolvedReferences
# requirement already includes cryptography -> pyjwt[crypto]
rsa_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(key))
keys[key["kid"]] = rsa_key
return keys
"""Obtains the JWK associated with the given key id.
Args:
key_id (str): The id of the key to fetch.
Returns:
the JWK associated with the given key id.
Raises:
TokenValidationError: when a key with that id cannot be found
"""
def get_key(self, key_id):
keys = self._fetch_jwks()
if keys and key_id in keys:
return keys[key_id]
if not self._cache_is_fresh:
keys = self._fetch_jwks(force=True)
if keys and key_id in keys:
return keys[key_id]
raise TokenValidationError('RSA Public Key with ID "{}" was not found.'.format(key_id))
class TokenVerifier():
"""Class that verifies ID tokens following the steps defined in the OpenID Connect spec.
An OpenID Connect ID token is not meant to be consumed until it's verified.
Args:
signature_verifier (SignatureVerifier): The instance that knows how to verify the signature.
issuer (str): The expected issuer claim value.
audience (str): The expected audience claim value.
leeway (int, optional): The clock skew to accept when verifying date related claims in seconds.
Defaults to 60 seconds.
"""
def __init__(self, signature_verifier, issuer, audience, leeway=0):
if not signature_verifier or not isinstance(signature_verifier, SignatureVerifier):
raise TypeError("signature_verifier must be an instance of SignatureVerifier.")
self.iss = issuer
self.aud = audience
self.leeway = leeway
self._sv = signature_verifier
self._clock = None # visible for testing
"""Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.
Args:
token (str): The JWT to verify.
nonce (str, optional): The nonce value sent during authentication.
max_age (int, optional): The max_age value sent during authentication.
organization (str, optional): The expected organization ID (org_id) claim value. This should be specified
when logging in to an organization.
Raises:
TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,
the token signature is invalid or the token has a claim missing or with unexpected value.
"""
def verify(self, token, nonce=None, max_age=None, organization=None):
# Verify token presence
if not token or not isinstance(token, str):
raise TokenValidationError("ID token is required but missing.")
# Verify algorithm and signature
payload = self._sv.verify_signature(token)
# Verify claims
self._verify_payload(payload, nonce, max_age, organization)
def _verify_payload(self, payload, nonce=None, max_age=None, organization=None):
try:
# on Python 2.7, 'str' keys as parsed as 'unicode'
# But 'unicode' was removed on Python 3.7
# noinspection PyUnresolvedReferences
ustr = unicode
except NameError:
ustr = str
# Issuer
if 'iss' not in payload or not isinstance(payload['iss'], (str, ustr)):
raise TokenValidationError('Issuer (iss) claim must be a string present in the ID token')
if payload['iss'] != self.iss:
raise TokenValidationError(
'Issuer (iss) claim mismatch in the ID token; expected "{}", '
'found "{}"'.format(self.iss, payload['iss']))
# Subject
if 'sub' not in payload or not isinstance(payload['sub'], (str, ustr)):
raise TokenValidationError('Subject (sub) claim must be a string present in the ID token')
# Audience
if 'aud' not in payload or not (isinstance(payload['aud'], (str, ustr)) or isinstance(payload['aud'], list)):
raise TokenValidationError(
'Audience (aud) claim must be a string or array of strings present in the ID token')
if isinstance(payload['aud'], list) and not self.aud in payload['aud']:
payload_audiences = ", ".join(payload['aud'])
raise TokenValidationError(
'Audience (aud) claim mismatch in the ID token; expected "{}" but was '
'not one of "{}"'.format(self.aud, payload_audiences))
elif isinstance(payload['aud'], (str, ustr)) and payload['aud'] != self.aud:
raise TokenValidationError(
'Audience (aud) claim mismatch in the ID token; expected "{}" '
'but found "{}"'.format(self.aud, payload['aud']))
# --Time validation (epoch)--
now = self._clock or time.time()
leeway = self.leeway
# Expires at
if 'exp' not in payload or not isinstance(payload['exp'], int):
raise TokenValidationError('Expiration Time (exp) claim must be a number present in the ID token')
exp_time = payload['exp'] + leeway
if now > exp_time:
raise TokenValidationError(
'Expiration Time (exp) claim error in the ID token; current time ({}) is '
'after expiration time ({})'.format(now, exp_time))
# Issued at
if 'iat' not in payload or not isinstance(payload['iat'], int):
raise TokenValidationError('Issued At (iat) claim must be a number present in the ID token')
# Nonce
if nonce:
if 'nonce' not in payload or not isinstance(payload['nonce'], (str, ustr)):
raise TokenValidationError('Nonce (nonce) claim must be a string present in the ID token')
if payload['nonce'] != nonce:
raise TokenValidationError(
'Nonce (nonce) claim mismatch in the ID token; expected "{}", '
'found "{}"'.format(nonce, payload['nonce']))
# Organization
if organization:
if 'org_id' not in payload or not isinstance(payload['org_id'], (str, ustr)):
raise TokenValidationError('Organization (org_id) claim must be a string present in the ID token')
if payload['org_id'] != organization:
raise TokenValidationError(
'Organization (org_id) claim mismatch in the ID token; expected "{}", '
| |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Contents loader functions for DataSource.
'''
from scipy.misc import imresize, imread
from shutil import rmtree
from six import StringIO
from six.moves.urllib.parse import urljoin
from tqdm import tqdm
import contextlib
import csv
import h5py
import numpy
import os
import six.moves.urllib.request as request
import six
import tempfile
from nnabla.logger import logger
pypng_available = False
try:
import png
pypng_available = True
except ImportError:
pass
cv2_available = False
try:
import cv2
# TODO: Currently cv2 image reader doesn't work.
# cv2_available = True
except ImportError:
pass
class FileReader:
'''FileReader
Read dataset from sevaral data sources.
Supported data sources are,
* Local file (file or directory name)
* HTTP/HTTPS (URI)
* S3 (URI with s3:// prefix)
Currently HTTP/HTTPS source does not support CACHE input because
there is no standard way to get directory entry with
HTTP/HTTPS/protocol.
To access S3 data, you must specify credentials with environment
variable.
For example,
::
$ export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
$ export AWS_SECRET_ACCESS_KEY=<KEY>
Or, you can specify PROFILE with following.
::
$ export AWS_DEFAULT_PROFILE=my_profile
'''
def __init__(self, base_uri):
self._base_uri = base_uri
if base_uri[0:5].lower() == 's3://':
self._file_type = 's3'
uri_header, uri_body = self._base_uri.split('://', 1)
us = uri_body.split('/')
bucketname = us.pop(0)
self._s3_base_key = '/'.join(us)
logger.info('Creating session for S3 bucket {}'.format(bucketname))
import boto3
self._s3_bucket = boto3.session.Session().resource('s3').Bucket(bucketname)
elif base_uri[0:7].lower() == 'http://' or base_uri[0:8].lower() == 'https://':
self._file_type = 'http'
else:
self._file_type = 'file'
@contextlib.contextmanager
def open(self, filename=None):
if filename is None:
filename = self._base_uri
else:
if self._file_type == 's3':
filename = urljoin(self._base_uri.replace(
's3://', 'http://'), filename.replace('\\', '/')).replace('http://', 's3://')
elif self._file_type == 'http':
filename = urljoin(self._base_uri, filename.replace('\\', '/'))
else:
filename = os.path.abspath(os.path.join(os.path.dirname(
self._base_uri.replace('\\', '/')), filename.replace('\\', '/')))
f = None
if self._file_type == 's3':
uri_header, uri_body = filename.split('://', 1)
us = uri_body.split('/')
bucketname = us.pop(0)
key = '/'.join(us)
logger.info('Opening {}'.format(key))
f = StringIO(self._s3_bucket.Object(key).get()['Body'].read())
elif self._file_type == 'http':
f = request.urlopen(filename)
else:
f = open(filename, 'rb')
yield f
f.close()
@contextlib.contextmanager
def open_cache(self, cache_name):
if self._file_type == 's3':
tmpdir = tempfile.mkdtemp()
filename = urljoin((self._base_uri + '/').replace('s3://', 'http://'),
cache_name.replace('\\', '/')).replace('http://', 's3://')
key = '/'.join(filename.split('/')[3:])
fn = '{}/{}'.format(tmpdir, os.path.basename(filename))
with open(fn, 'wb') as f:
f.write(self._s3_bucket.Object(key).get()['Body'].read())
with h5py.File(fn, 'r') as h5:
yield h5
rmtree(tmpdir, ignore_errors=True)
elif self._file_type == 'http':
pass
else:
filename = os.path.abspath(os.path.join(os.path.dirname(
(self._base_uri + '/').replace('\\', '/')), cache_name.replace('\\', '/')))
with h5py.File(filename, 'r') as h5:
yield h5
def listdir(self):
if self._file_type == 's3':
list = []
for fn in self._s3_bucket.objects.filter(Prefix=self._s3_base_key + '/', Delimiter='/'):
list.append(os.path.basename(fn.key))
return sorted(list)
elif self._file_type == 'http':
return None
return sorted(os.listdir(self._base_uri))
def load_image_imread(file, shape=None, max_range=1.0):
'''
Load image from file like object.
:param file: Image contents
:type file: file like object.
:param shape: shape of output array
e.g. (3, 128, 192) : n_color, height, width.
:type shape: tuple of int
:param float max_range: the value of return array ranges from 0 to `max_range`.
:return: numpy array
'''
img255 = imread(
file) # return value is from zero to 255 (even if the image has 16-bitdepth.)
if len(img255.shape) == 2: # gray image
height, width = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, 1
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == 1)
if out_height != height or out_width != width:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width))
img255 = img255.reshape((out_n_color, out_height, out_width))
elif len(img255.shape) == 3: # RGB image
height, width, n_color = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, n_color
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == n_color)
if out_height != height or out_width != width or out_n_color != n_color:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width, out_n_color))
img255 = img255.transpose(2, 0, 1)
if max_range < 0 or max_range == 255.0:
return img255
else:
return img255 * (max_range / 255.0)
def load_image_pypng(file, shape=None, max_range=1.0):
import png
r = png.Reader(file=file)
width, height, pixels, metadata = r.read()
bitscale = 2 ** metadata['bitdepth'] - 1
img = numpy.array(list(pixels), dtype=numpy.float32).reshape(
(height, width, -1)) / bitscale # (height, width, n_channel)
if metadata['alpha'] and metadata['planes'] == 4: # RGBA
# TODO: this case is note tested well
try:
bg = numpy.array(metadata['background']) / bitscale
except KeyError:
bg = numpy.array([1.0, 1.0, 1.0])
rgb = img[:, :, :3]
alpha = img[:, :, 3]
imshp = alpha.shape
img = numpy.outer((1 - alpha), bg).reshape(imshp + (3,)) +\
numpy.tile(alpha.reshape(imshp + (1,)), (1, 1, 3)) * rgb
out_n_color = 3
elif metadata['alpha'] and metadata['planes'] == 2: # (gray, alpha)
# TODO: this case is note tested well
try:
bg = numpy.array(metadata['background']) / bitscale
except KeyError:
bg = numpy.array([1.0])
rgb = img[:, :, :1]
alpha = img[:, :, 1]
imshp = alpha.shape
img = numpy.outer((1 - alpha), bg).reshape(imshp + (1,)
) + alpha.reshape(imshp + (1,)) * rgb
out_n_color = 1
else: # RGB or Gray
out_n_color = metadata['planes']
# Reshape image
if max_range < 0:
max_range = 255
if shape is None:
return img.transpose(2, 0, 1) * max_range
else:
out_n_color, out_height, out_width = shape
return imresize(img, (out_height, out_width, out_n_color)).transpose((2, 0, 1)) * max_range / 255.0
def load_image_cv2(file, shape=None, max_range=1.0):
img = cv2.imdecode(numpy.asarray(bytearray(file.read()),
dtype=numpy.uint8), cv2.IMREAD_UNCHANGED)
if len(img.shape) == 2: # gray image
height, width = img.shape
img = img.reshape(1, height, width)
elif len(img.shape) == 3: # rgb image
if img.shape[2] == 3:
img = img[:, :, ::-1].copy() # BGR to RGB
img = img.transpose(2, 0, 1)
elif img.shape[2] == 4:
img = img.transpose(2, 0, 1) # BGRA to RGBA
img = numpy.array([img[2], img[1], img[0], img[3]])
if max_range < 0:
pass
elif max_range == 255:
if img.dtype == numpy.uint8:
pass
elif img.dtype == numpy.uint16:
img = numpy.uint8(img / 256)
elif max_range == 65535:
if img.dtype == numpy.uint8:
img = numpy.uint16(img * 256)
elif img.dtype == numpy.uint16:
pass
else:
if img.dtype == numpy.uint8:
img = numpy.float32(img) * max_range / 255.0
elif img.dtype == numpy.uint16:
img = numpy.float32(img) * max_range / 65535.0
return img
def load_image(file, shape=None, normalize=False):
if normalize:
max_range = 1.0
else:
max_range = -1
global cv2_available
global pypng_available
if cv2_available:
return load_image_cv2(file, shape, max_range)
else:
ext = None
try:
ext = os.path.splitext(file.name)[1].lower()
except:
pass
if ext == '.png' and pypng_available:
r = png.Reader(file=file)
width, height, pixels, metadata = r.read()
file.seek(0)
if metadata['bitdepth'] > 8: # if png with high bitdepth
return load_image_pypng(file, shape, max_range)
return load_image_imread(file, shape, max_range)
def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
for row in csv.reader(file):
value_list.append(map(float, row))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape)
def load_npy(path, shape=None, normalize=False):
if shape is None:
return numpy.load(path)
else:
return numpy.load(path).reshape(shape)
_load_functions = {
'.bmp': load_image,
'.jpg': load_image,
'.jpeg': load_image,
'.png': load_image,
'.gif': load_image,
'.tif': load_image,
'.csv': load_csv,
'.npy': load_npy}
def register_load_function(ext, function):
_load_functions[ext] = function
def load(ext):
if ext in _load_functions:
return _load_functions[ext]
return None
def _download_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
def get_data_home():
import os
d = os.path.expanduser("~/nnabla_data")
if not os.path.isdir(d):
os.makedirs(d)
return d
def download(url):
filename = url.split('/')[-1]
cache = os.path.join(get_data_home(), filename)
if os.path.exists(cache):
logger.info("> {} in cache.".format(cache))
logger.info("> If you have any issue when using this file, ")
logger.info("> manually remove the file and try download again.")
else:
r = request.urlopen(url)
try:
if six.PY2:
content_length = int(r.info().dict['content-length'])
elif six.PY3:
content_length = int(r.info()['Content-Length'])
except:
content_length = 0
unit = 1000000
content = b''
with tqdm(total=content_length, desc=filename) as t:
while True:
data = r.read(unit)
l = len(data)
t.update(l)
if l == 0:
break
content += data
with open(cache, 'wb') as | |
= RobotModelArtist(self.process.robot_model, self.robot_layer)
self._robot_artist.scale(1000)
return self._robot_artist
else:
print("Error: Attempy to create new RobotModelArtist but process.robot_model is None")
self._robot_artist = None
return None
#######################################
# Functions to handle the guid records
#######################################
def beam_guids(self, beam_id):
# type: (str) -> dict[str, list[guid]]
if beam_id not in self._beam_guids:
self._beam_guids[beam_id] = {}
return self._beam_guids[beam_id]
def beam_guids_at_position(self, beam_id, position_id):
# type: (str, str) -> list[guid]
if position_id not in self.beam_guids(beam_id):
self.beam_guids(beam_id)[position_id] = []
return self.beam_guids(beam_id)[position_id]
def gripper_guids(self, beam_id):
# type: (str) -> dict[str, list[guid]]
if beam_id not in self._gripper_guids:
self._gripper_guids[beam_id] = {}
return self._gripper_guids[beam_id]
def gripper_guids_at_position(self, beam_id, position_id):
# type: (str, str) -> list[guid]
if position_id not in self.gripper_guids(beam_id):
self.gripper_guids(beam_id)[position_id] = []
return self.gripper_guids(beam_id)[position_id]
def asstool_guids(self, joint_id):
# type: (tuple(str, str)) -> dict[str, list[guid]]
if joint_id not in self._asstool_guids:
self._asstool_guids[joint_id] = {}
return self._asstool_guids[joint_id]
def asstool_guids_at_position(self, joint_id, position_id):
# type: (str, str) -> list[guid]
if position_id not in self.asstool_guids(joint_id):
self.asstool_guids(joint_id)[position_id] = []
return self.asstool_guids(joint_id)[position_id]
def interactive_guids(self, beam_id):
# type: (tuple(str, str)) -> dict[str, list[guid]]
if beam_id not in self._interactive_guids:
self._interactive_guids[beam_id] = {}
return self._interactive_guids[beam_id]
def interactive_guids_at_layer(self, beam_id, layer_name):
# type: (str, str) -> list[guid]
if layer_name not in self.interactive_guids(beam_id):
self.interactive_guids(beam_id)[layer_name] = []
return self.interactive_guids(beam_id)[layer_name]
def state_visualization_guids(self, object_id):
# type: (str) -> list[guid]
if object_id not in self._state_visualization_guids:
self._state_visualization_guids[object_id] = []
return self._state_visualization_guids[object_id]
def tools_in_storage_guids(self, tool_id):
# type: (str) -> list[guid]
if tool_id not in self._tools_in_storage_guids:
self._tools_in_storage_guids[tool_id] = []
return self._tools_in_storage_guids[tool_id]
def env_mesh_guids(self, env_id):
# type: (str) -> list[guid]
if env_id not in self._env_mesh_guids:
self._env_mesh_guids[env_id] = []
return self._env_mesh_guids[env_id]
###########################################################
# Functions to keep track of user selected interactive beam
###########################################################
@property
def selected_beam_id(self):
return self._selected_beam_id
@selected_beam_id.setter
def selected_beam_id(self, beam_id):
# Do not change anything if the id is the same
if beam_id == self._selected_beam_id:
return
self._selected_beam_id = beam_id
# update selected_key_position object
if beam_id is not None:
self.selected_key_position = ProcessKeyPosition(self.process, self.selected_beam_id, 0)
self.selected_key_position.final_position()
def select_next_beam(self):
# type: () -> str
""" Increment self.selected_beam_id based on its seq_num """
assembly = self.process.assembly
seq_num = assembly.get_beam_sequence(self.selected_beam_id) + 1
seq_num = min(seq_num, len(assembly.sequence) - 1) # seq_num not more than len(assembly.sequence) - 1
self.selected_beam_id = assembly.sequence[seq_num]
def select_previous_beam(self):
# type: () -> str
""" Increment self.selected_beam_id based on its seq_num """
assembly = self.process.assembly
seq_num = assembly.get_beam_sequence(self.selected_beam_id) - 1
seq_num = max(seq_num, 0) # seq_num not less than 0
self.selected_beam_id = assembly.sequence[seq_num]
#############################
# Beam in Interactive Layers
#############################
def draw_beam_seqtag(self, beam_id, faces=[1, 3], padding_factor=0.2, size_factor=0.6, redraw=True):
assembly = self.process.assembly
seq_num = assembly.sequence.index(beam_id)
rs.EnableRedraw(False)
for face_id in faces:
# Get Face Frame
beam = assembly.beam(beam_id)
face_frame = beam.reference_side_wcf(face_id)
# Move Frame orgin for padding
beam_size_min = min(beam.height, beam.width)
padding = beam_size_min * padding_factor
padded_location_origin = face_frame.to_world_coordinates([padding, padding, beam_size_min * 0.01])
face_frame.point = padded_location_origin
# Test and Size
tag_text = ".%s." % seq_num
tag_height = beam_size_min * size_factor
# Create Tag
layer = 'itj::interactive::beams_seqtag'
guid = AddAnnotationText(face_frame, tag_text, tag_height, layer, redraw=redraw)
self.interactive_guids_at_layer(beam_id, layer).append(guid)
if redraw:
rs.EnableRedraw(True)
def draw_beam_mesh(self, beam_id, update_cache=False, redraw=True):
# type:(str, bool, bool) -> None
assembly = self.process.assembly
if beam_id not in assembly.beam_ids():
raise KeyError("Beam %i not in Assembly" % beam_id)
beam_mesh = assembly.get_beam_mesh_in_wcf(beam_id, not update_cache)
# Layer
layer = 'itj::interactive::beams_mesh'
rs.CurrentLayer(layer)
# Draw Mesh
guids = self.draw_meshes_get_guids([beam_mesh], beam_id)
self.interactive_guids_at_layer(beam_id, layer).extend(guids)
# Redraw
if redraw:
rs.EnableRedraw(True)
def draw_beam_brep(self, beam_id, delete_old_brep=True, update_mesh_cache=False, redraw=True):
# type: (str, bool, bool, bool) -> List[guid]
assembly = self.process.assembly
if beam_id not in assembly.beam_ids():
raise KeyError("Beam %i not in Assembly" % beam_id)
if update_mesh_cache:
assembly.beam(beam_id).remove_cached_mesh()
assembly.get_beam_mesh_in_wcf(beam_id, False)
# Layer
layer = 'itj::interactive::beams_brep'
rs.CurrentLayer(layer)
# Obtain tool features on Beam from Process
other_feature_shapes = self.process.get_tool_features_on_beam(beam_id)
# * Call assembly artist with all the extra feature shapes
guids = self.assembly_artist.draw_beam(beam_id=beam_id, delete_old=delete_old_brep, redraw=False, other_feature_shapes=other_feature_shapes)
self.interactive_guids_at_layer(beam_id, layer).extend(guids)
# Redraw
if redraw:
rs.EnableRedraw(True)
def redraw_interactive_beam(self, beam_id, force_update=True, draw_mesh=False, draw_nurbs=True, draw_tag=True, redraw=True):
''' Redraw beam visualizations.
Redraws interactive beam mesh and sequence tag
'''
rs.EnableRedraw(False)
if force_update:
self.delete_interactive_beam_visualization(beam_id, redraw=False)
if draw_mesh:
self.draw_beam_mesh(beam_id, update_cache=force_update, redraw=False)
if draw_nurbs:
self.draw_beam_brep(beam_id, delete_old_brep=force_update, update_mesh_cache=False, redraw=False)
if draw_tag:
self.draw_beam_seqtag(beam_id, redraw=False)
if redraw:
rs.EnableRedraw(True)
def interactive_beam_guid(self, beam_id, layer='itj::interactive::beams_brep'):
# type:(str, str) -> list[guid]
''' Returns the interactive beam's guid(s)
Typically this is a list of one mesh that represent the beam.
'''
return self.interactive_guids_at_layer(beam_id, layer)
######################
# Beam in Interactive Layers
# Show Hide Color
######################
def show_interactive_beam(self, beam_id):
""" Show the beam of the beam_id.
"""
for layer in self.interactive_layers:
rs.ShowObject(self.interactive_guids_at_layer(beam_id, layer))
def hide_interactive_beam(self, beam_id):
""" Show the beam of the beam_id.
"""
for layer in self.interactive_layers:
rs.HideObject(self.interactive_guids_at_layer(beam_id, layer))
def show_interactive_beam_until(self, beam_id):
""" Show only the beams before and the given beam_id, others are hidden.
If beam_id is None, all beams are hidden.
If beam_id is not in the list of sequence, all beams are shown.
"""
assembly = self.process.assembly
show = True
if beam_id is None:
show = False
# Looping through beams in sequence, flip the show switch after it reaches the beam_id
for _beam_id in assembly.sequence:
if show:
self.show_interactive_beam(_beam_id)
else:
self.hide_interactive_beam(_beam_id)
if _beam_id == beam_id:
show = False
def change_interactive_beam_colour(self, beam_id, meaning, layer='itj::interactive::beams_brep'):
# type(str, str) -> None
""" Chagne the beam brep and mesh color to a given colour string
Colour string refer to color_meaning dict
"""
for guid in self.interactive_guids_at_layer(beam_id, layer):
rs.ObjectColor(guid, self.color_meaning.get(meaning, (0, 0, 0)))
#############################
# Beam in different positions
#############################
def draw_beam_all_positions(self, beam_id, delete_old=False, verbose=False, redraw=True):
""" Delete old beam geometry if delete_old is True
Redraw them in Rhino in different layers.
The resulting Rhino guids are kept in self.beam_guids_at_position(beam_id, position)
This applies to all positions where the attribute is set in beam attributes.
"""
rs.EnableRedraw(False)
for beam_position in ProcessKeyPosition().possible_beam_positions:
layer_name = 'itj::beam::' + beam_position
# If not delete_old, and there are already items drawn, we preserve them.
if len(self.beam_guids_at_position(beam_id, beam_position)) > 0 and not delete_old:
continue
# Delete old geometry
self.delete_beam_at_position(beam_id, beam_position, redraw=False)
# Skip the rest of code if the position does not exist.
if self.process.assembly.get_beam_attribute(beam_id, beam_position) is None:
if verbose:
print("Skipping Beam (%s) position: %s" % (beam_id, beam_position))
continue
if verbose:
print("Drawing Beam(%s) in position: %s" % (beam_id, beam_position))
# Transform the beam_mesh to location and
T = self.process.assembly.get_beam_transformaion_to(beam_id, beam_position)
beam_mesh = self.process.assembly.get_beam_mesh_in_wcf(beam_id).transformed(T) # type: Mesh
guids = self.draw_meshes_get_guids([beam_mesh], beam_id, redraw=False)
self.beam_guids_at_position(beam_id, beam_position).extend(guids)
if redraw:
rs.EnableRedraw(True)
def delete_beam_all_positions(self, beam_id, redraw=True):
"""Delete all Rhino geometry associated to a beam at all position.
"""
rs.EnableRedraw(False)
for beam_position in ProcessKeyPosition().possible_beam_positions:
# The redraw is supressed in each individual call to save time.
self.delete_beam_at_position(beam_id, beam_position, redraw=False)
if redraw:
rs.EnableRedraw(True)
def delete_beam_at_position(self, beam_id, beam_position, redraw=True):
# type:(str, str, bool) -> None
"""Delete all Rhino geometry associated to a beam at specified position
No change will be made if the beam_id or beam_position do not exist in the guid dictionary.
"""
if len(self.beam_guids_at_position(beam_id, beam_position)) == 0:
return
guids = self.beam_guids_at_position(beam_id, beam_position)
if len(guids) > 0:
purge_objects(guids, redraw)
del self.beam_guids_at_position(beam_id, beam_position)[:]
def show_beam_at_one_position(self, beam_id, position=None):
""" Show Beam only at the specified position.
Position is the position attribute name, if left None, selected_key_position will be used.
"""
if position is None:
position = self.selected_key_position.current_beam_pos
for beam_position in ProcessKeyPosition().possible_beam_positions:
if beam_position == position:
rs.ShowObject(self.beam_guids_at_position(beam_id, beam_position))
else:
rs.HideObject(self.beam_guids_at_position(beam_id, beam_position))
def hide_beam_all_positions(self, beam_id):
""" Hide all gripper instances in the specified positions.
`positions` are defaulted to all position.
# """
self.show_beam_at_one_position(beam_id, '')
def delete_interactive_beam_visualization(self, beam_id, redraw=True):
# type:(str, bool) -> None
""" Delete visualization geometry geometry (brep, mesh, tag etc) related to a beam.
Tools are not affected.
Stored guid reference is also removed.
If beam_id is not yet tracked in self.guid, the new entry will be created.
"""
rs.EnableRedraw(False)
if beam_id in self.process.assembly.beam_ids():
for layer in self.interactive_layers:
if len(self.interactive_guids_at_layer(beam_id, layer)) > 0:
purge_objects(self.interactive_guids_at_layer(beam_id, layer), redraw=False)
del self.interactive_guids_at_layer(beam_id, layer)[:]
del self.assembly_artist.beam_guids(beam_id)[:]
if redraw:
rs.EnableRedraw(True)
@property
def all_layer_names(self):
for layer in self.interactive_layers:
yield layer
for gripper_position in ProcessKeyPosition().possible_gripper_positions:
yield 'itj::gripper::' + gripper_position
for tool_position in ProcessKeyPosition().possible_tool_positions:
yield 'itj::tool::' + tool_position
for beam_position in ProcessKeyPosition().possible_beam_positions:
yield 'itj::beam::' + beam_position
yield self.state_visualization_layer
yield self.tools_in_storage_layer
yield self.env_mesh_layer
yield self.robot_layer
def empty_layers(self):
# type:() -> None
"""Clear the default | |
<filename>edge-bootstrap/python/edgectl/utils/certutil.py<gh_stars>0
""" Module implements utility class EdgeCertUtil for generating a X.509 certificate chain"""
import logging
import os
from datetime import datetime
from shutil import copy2
from OpenSSL import crypto
from edgectl.config import EdgeConstants as EC
import edgectl.errors
from edgectl.utils.edgeutils import EdgeUtils
class EdgeCertUtil(object):
""" Class EdgeCertUtil implements APIs for generating X.509 certificate chains.
Clients are expected to begin either by calling create_root_ca_cert() or
set_root_ca_cert() to establish a root CA certificate in the chain.
Thereafter, clients can call API create_intermediate_ca_cert() to create
any number of intermediate CA certs. To terminate a chain clients can
call create_server_cert(). To export the certificate chain clients should
call APIs export_cert_artifacts_to_dir() export_pfx_cert().
"""
TYPE_RSA = 0
MIN_VALIDITY_DAYS = 1
MAX_VALIDITY_DAYS = 1095 #3 years
MIN_PASSPHRASE_LENGTH = 4
MAX_PASSPHRASE_LENGTH = 1023
CA_KEY_LEN = 4096
CA_INT_KEY_LEN = 4096
SERVER_KEY_LEN = 2048
MIN_COMMON_NAME_LEN = 1
MAX_COMMON_NAME_LEN = 64
DIGEST = 'sha256'
_type_dict = {TYPE_RSA: crypto.TYPE_RSA}
_subject_validation_dict = {
EC.SUBJECT_COUNTRY_KEY: {'MIN': 2, 'MAX': 2},
EC.SUBJECT_STATE_KEY: {'MIN': 0, 'MAX': 128},
EC.SUBJECT_LOCALITY_KEY: {'MIN': 0, 'MAX': 128},
EC.SUBJECT_ORGANIZATION_KEY: {'MIN': 0, 'MAX': 64},
EC.SUBJECT_ORGANIZATION_UNIT_KEY: {'MIN': 0, 'MAX': 64},
EC.SUBJECT_COMMON_NAME_KEY: {'MIN': MIN_COMMON_NAME_LEN, 'MAX': MAX_COMMON_NAME_LEN},
}
def __init__(self, serial_num=1000):
self._cert_chain = {}
self._serial_number = serial_num
@staticmethod
def is_valid_certificate_subject(subject_dict):
"""
Utility API to validate if the certificate subject fields are valid.
Validates if all the required keys listed below are present and have valid
values per the description.
Args:
subject_dict (dict): Certificate subject dict with both key and values as strings
edgectl.edgeconstants.SUBJECT_COUNTRY_KEY: Country code (2 chars)
edgectl.edgeconstants.SUBJECT_STATE_KEY: State (128 chars)
edgectl.edgeconstants.SUBJECT_LOCALITY_KEY: Locality/city (128 chars)
edgectl.edgeconstants.SUBJECT_ORGANIZATION_KEY: organization (64 chars)
edgectl.edgeconstants.SUBJECT_ORGANIZATION_UNIT_KEY: organization unit (64 chars)
edgectl.edgeconstants.SUBJECT_COMMON_NAME_KEY: device CA common name (64 chars)
Returns:
True if the subject field is valid, False otherwise.
"""
result = True
for key in list(EdgeCertUtil._subject_validation_dict.keys()):
try:
field = subject_dict[key]
if field is not None:
length_field = len(field)
min_len = EdgeCertUtil._subject_validation_dict[key]['MIN']
max_len = EdgeCertUtil._subject_validation_dict[key]['MAX']
if length_field < min_len or length_field > max_len:
logging.error('Length of X.509 cert subject field: %s is invalid', key)
result = False
else:
logging.error('Value for field: %s cannot be None', key)
result = False
except KeyError:
logging.error('Missing key in X.509 certificate subject: %s', key)
result = False
if result is False:
break
return result
def create_root_ca_cert(self, id_str, **kwargs):
"""
API to create the root certificate in the certificate chain. This implies that the
CA certificate will be self signed.
Args:
id_str (str): A user defined unique id string to identify the certificate
kwargs:
validity_days_from_now (int): Number of days for certificate validity starting
from the time the API was invoked. Optional,
if validity is not provided default is 365 days.
Validity days min: EdgeCertUtil.MIN_VALIDITY,
max: EdgeCertUtil.MAX_VALIDITY.
subject_dict (dict): Certificate subject dict set per specifications of API
validate_certificate_subject(). Required.
passphrase (str): Private key passphrase. Optional.
Passphrase length min: EdgeCertUtil.MIN_PASSPHRASE_LENGTH,
max: EdgeCertUtil.MAX_PASSPHRASE_LENGTH.
Raises:
edgectl.errors.EdgeValueError - Any input found to be invalid
"""
if id_str in list(self._cert_chain.keys()):
msg = 'Duplicate root CA certificate ID: {0}'.format(id_str)
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
validity_days_from_now = _get_kwargs_validity('validity_days_from_now',
self.MIN_VALIDITY_DAYS,
self.MAX_VALIDITY_DAYS, **kwargs)
subj_dict = None
if 'subject_dict' in kwargs:
subj_dict = kwargs['subject_dict']
if self.is_valid_certificate_subject(subj_dict) is False:
msg = 'Certificate subject dictionary is invalid.'
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
else:
msg = 'Certificate subject dictionary is required.'
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
passphrase = _get_kwargs_passphrase('passphrase', self.MIN_PASSPHRASE_LENGTH,
self.MAX_PASSPHRASE_LENGTH, **kwargs)
key_obj = self._create_key_pair(self.TYPE_RSA, self.CA_KEY_LEN)
csr_obj = self._create_csr(key_obj,
C=subj_dict[EC.SUBJECT_COUNTRY_KEY],
ST=subj_dict[EC.SUBJECT_STATE_KEY],
L=subj_dict[EC.SUBJECT_LOCALITY_KEY],
O=subj_dict[EC.SUBJECT_ORGANIZATION_KEY],
OU=subj_dict[EC.SUBJECT_ORGANIZATION_KEY],
CN=subj_dict[EC.SUBJECT_COMMON_NAME_KEY])
validity_secs_from_now = validity_days_from_now * 24 * 60 * 60
cert_obj = self._create_ca_cert(csr_obj,
csr_obj,
key_obj,
self._serial_number,
(0, validity_secs_from_now),
False)
self._serial_number += 1
cert_dict = {}
cert_dict['key_pair'] = key_obj
cert_dict['csr'] = csr_obj
cert_dict['cert'] = cert_obj
cert_dict['issuer_id'] = id_str
cert_dict['passphrase'] = <PASSWORD>
self._cert_chain[id_str] = cert_dict
return
def set_ca_cert(self, id_str, **kwargs):
"""
API to set a CA certificate in the certificate chain. This certificate may be
an intermediate CA certificate or a root CA certificate.
Args:
id_str (str): A user defined unique id string to identify the certificate
kwargs:
ca_cert_file_path (str): File path to the CA certificate
ca_private_key_file_path (str): File path to the CA certificate's private key
ca_root_cert_file_path (str): File path to the CA certificate's root
certificate if any. If this is a self
signed root certificate set this to the same
value as kwarg 'ca_cert_file_path'.
ca_root_chain_cert_file_path (str): File path to the CA certificate's root chain
certificate if any. If this is a self
signed root certificate set this to the same
value as kwarg 'ca_cert_file_path'.
passphrase (str): Private key passphrase. Optional.
Passphrase length min: EdgeCertUtil.MIN_PASSPHRASE_LENGTH,
max: EdgeCertUtil.MAX_PASSPHRASE_LENGTH.
Raises:
edgectl.errors.EdgeValueError - Any input found to be invalid
edgectl.errors.EdgeFileAccessError - If any of the files cannot be read or
are in an invalid format.
"""
if id_str in list(self._cert_chain.keys()):
msg = 'Duplicate CA certificate ID: {0}'.format(id_str)
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
ca_cert_file_path = _get_kwargs_files('ca_cert_file_path', 'CA certificate', **kwargs)
ca_private_key_file_path = _get_kwargs_files('ca_private_key_file_path',
'CA private key', **kwargs)
ca_root_cert_file_path = _get_kwargs_files('ca_root_cert_file_path',
'CA''s root certificate', **kwargs)
ca_root_chain_cert_file_path = _get_kwargs_files('ca_root_chain_cert_file_path',
'CA''s chain certificate', **kwargs)
passphrase = _get_kwargs_passphrase('passphrase', self.MIN_PASSPHRASE_LENGTH,
self.MAX_PASSPHRASE_LENGTH, **kwargs)
logging.debug('Setting Root CA for id: %s\n' \
' CA Cert File: %s\n' \
' CA Root Cert File: %s\n' \
' CA Root Chain Cert File: %s\n' \
' CA Private Key File: %s',
id_str, ca_cert_file_path,
ca_root_cert_file_path, ca_root_chain_cert_file_path,
ca_private_key_file_path)
try:
# read the CA private key
with open(ca_private_key_file_path, 'rb') as key_file:
pk_data = key_file.read()
ca_key_obj = crypto.load_privatekey(crypto.FILETYPE_PEM,
pk_data,
passphrase=passphrase)
ca_key_obj.check()
except crypto.Error as ex_crypto:
msg = 'Cryptographic error when reading private key file: {0}.' \
' Error: {1}'.format(ca_private_key_file_path, str(ex_crypto))
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
except TypeError as ex_type:
logging.error('%s', str(ex_type))
msg = 'Unsupported private key type. Currently RSA is only supported.'
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
except IOError as ex:
msg = 'Could not read private key file: {0}.' \
' Errno: {1} Error: {2}'.format(ca_private_key_file_path,
str(ex.errno), ex.strerror)
logging.error(msg)
raise edgectl.errors.EdgeFileAccessError(msg, ca_private_key_file_path)
try:
# read the CA certificate
with open(ca_cert_file_path, 'rb') as cert_file:
cert_data = cert_file.read()
ca_cert_obj = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
if ca_cert_obj.has_expired():
msg = 'Expired CA certificate provided: {0}'.format(ca_cert_file_path)
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
cert_dict = {}
cert_dict['key_pair'] = ca_key_obj
cert_dict['cert'] = ca_cert_obj
cert_dict['issuer_id'] = id_str
cert_dict['key_file'] = ca_private_key_file_path
cert_dict['ca_chain'] = ca_root_chain_cert_file_path
cert_dict['ca_root'] = ca_root_cert_file_path
self._cert_chain[id_str] = cert_dict
except crypto.Error as ex_crypto:
msg = 'Crypto Error: {0}'.format(str(ex_crypto))
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
except IOError as ex:
msg = 'Could not read certificate file: {0}.' \
' Errno: {1} Error: {2}'.format(ca_cert_file_path,
str(ex.errno), ex.strerror)
logging.error(msg)
raise edgectl.errors.EdgeFileAccessError(msg, ca_cert_file_path)
@staticmethod
def _get_maximum_validity_days(not_after_ts_asn1, validity_days_from_now):
''' Returns the least number of days between:
- now() and the certificate expiration timestamp and
- requested certificate expiration time expressed in days from now()
'''
result = 0
try:
expiration_date = datetime.strptime(not_after_ts_asn1.decode('utf-8'), "%Y%m%d%H%M%SZ")
expires_in = expiration_date - datetime.now()
if expires_in.days > 0:
result = min(expires_in.days, validity_days_from_now)
logging.debug('Max validity days: %s,' \
' Certificate expiration timestamp: %s,' \
' Certificate expiration days: %s,' \
' Requested expiration days: %s',
str(result), not_after_ts_asn1, str(expires_in.days),
str(validity_days_from_now))
return result
except:
msg = 'Certificate date format incompatible {0}'.format(not_after_ts_asn1)
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
def create_intermediate_ca_cert(self, id_str, issuer_id_str, **kwargs):
"""
API to create an intermediate CA certificate issued by another CA in the certificate chain.
Args:
id_str (str): A user defined unique id string to identify the certificate
issuer_id_str (str): A unique id string to identify the issuing CA
kwargs:
validity_days_from_now (int): Number of days for certificate validity starting
from the time the API was invoked. Optional,
if validity is not provided default is 365 days.
Validity days min: EdgeCertUtil.MIN_VALIDITY,
max: EdgeCertUtil.MAX_VALIDITY.
common_name (str): Common name for the CA certificate. Required.
Common name length min: EdgeCertUtil.MIN_COMMON_NAME_LEN,
max: EdgeCertUtil.MAX_COMMON_NAME_LEN.
set_terminal_ca (bool): If set to True, it sets path len to zero which
implies that this CA cannot issue other CA certificates.
This CA can however issue non CA certificates. Optional.
Default is True.
passphrase (str): Private key passphrase. Optional.
Passphrase length min: EdgeCertUtil.MIN_PASSPHRASE_LENGTH,
max: EdgeCertUtil.MAX_PASSPHRASE_LENGTH.
Raises:
edgectl.errors.EdgeValueError - Any input found to be invalid
edgectl.errors.EdgeFileAccessError - If any of the files cannot be read or
are in an invalid format.
"""
if id_str in list(self._cert_chain.keys()):
msg = 'Duplicate intermediate CA certificate ID: {0}'.format(id_str)
logging.error(msg)
raise edgectl.errors.EdgeValueError(msg)
if issuer_id_str not in list(self._cert_chain.keys()):
msg = 'Invalid issuer certificate ID: {0}'.format(issuer_id_str)
raise edgectl.errors.EdgeValueError(msg)
validity_days_from_now = _get_kwargs_validity('validity_days_from_now',
self.MIN_VALIDITY_DAYS,
self.MAX_VALIDITY_DAYS, **kwargs)
passphrase = | |
<gh_stars>0
import json
import locale
import os
import pprint
import requests
import time
from collections import Counter
import matplotlib.pyplot as plt
from tqdm import tqdm
import xml.etree.ElementTree as et
data_path = "../data_lab1/"
# żeby możliwe było sortowanie alfabetyczne w języku polskim
locale.setlocale(locale.LC_COLLATE, "pl_PL.UTF-8")
# Sort bills according to their size and take top 50 (largest) bills.
def find_largest_files():
largest_bills = set()
min_len_in_largest = 0
with tqdm(total=1179) as pbar:
for filename in os.listdir(data_path):
if filename.endswith(".txt"):
filepath = os.path.join(data_path, filename)
content = open(filepath, 'r', encoding='utf-8').read().split()
content = " ".join(content)
content_size = len(content)
if content_size > min_len_in_largest or len(largest_bills) < 50:
largest_bills.add((filename, len(content)))
min_len_in_largest = content_size
pbar.update(1)
largest_bills_sorted = list(sorted(largest_bills, key=lambda kv: kv[1], reverse=True))[:50]
with open("largest_bills.json", "w") as file:
json.dump(largest_bills_sorted, file)
# print(len(largest_bills_sorted))
top_50_bills = set(map(lambda kv: kv[0], largest_bills_sorted))
return top_50_bills
top_50_bills = find_largest_files()
# Use the lemmatized and sentence split documents (from ex. 5) to identify the expressions that consist of
# consecutive words starting with a capital letter
# (you will have to look at the inflected form of the word to check its capitalization)
# that do not occupy the first position in a sentence.
user = "mojadresemail2"
url = "http://ws.clarin-pl.eu/nlprest2/base"
# kod na podstawie przykładu http://nlp.pwr.wroc.pl/redmine/projects/nlprest2/wiki/Python
def upload(file):
with open(file, "rb") as myfile:
doc = myfile.read()
return requests.post(url + '/upload/', data=doc, headers={'Content-Type': 'binary/octet-stream'})
def process(data):
doc = json.dumps(data)
taskid = requests.post(url + '/startTask/', data=doc, headers={'Content-Type': 'application/json'}) \
.content.decode('utf-8')
time.sleep(0.2)
resp = requests.get(url + '/getStatus/' + taskid).json()
while resp["status"] == "QUEUE" or resp["status"] == "PROCESSING":
time.sleep(0.5)
resp = requests.get(url + '/getStatus/' + taskid).json()
if resp["status"] == "ERROR":
print("Error " + data["value"])
return None
return resp["value"]
def eval_files(bills, lpmn="any2txt|wcrft2", out_path='out/'):
with tqdm(total=1179) as pbar:
for filename in os.listdir(data_path):
if filename.endswith(".txt") and filename in bills:
filepath = os.path.join(data_path, filename)
fileid = upload(filepath).content.decode("utf-8")
# print("Processing: " + filename)
data = {'lpmn': lpmn, 'user': user, 'file': fileid}
data = process(data)
if data is None:
continue
data = data[0]["fileID"]
content = requests.get(url + '/download' + data).content.decode('utf-8')
with open(out_path + os.path.basename(filename) + '.ccl', "w", encoding='utf-8') as outfile:
outfile.write(content)
pbar.update(1)
eval_files(top_50_bills)
# Compute the frequency of each identified expression and print 50 results with the largest number of occurrences.
def find_capitalised_expressions():
counted_expressions = Counter({})
out_path = 'out/'
with tqdm(total=50) as pbar:
for filename in os.listdir(out_path):
if filename.endswith(".ccl"):
filepath = os.path.join(out_path, filename)
ccl_file = et.parse(filepath).getroot()
for chunk in ccl_file:
for sentence in chunk:
word_in_sentence_no = 0
is_last_upper = False
expressions = []
for token in sentence:
if token.tag == 'tok':
real_text = token[0].text
text_tag = token[1][1].text
base = token[1][0].text
if word_in_sentence_no > 0 and real_text[0].isalpha() \
and real_text[0].upper() == real_text[0]:
if is_last_upper:
expressions[-1] += " " + real_text
else:
expressions.append(real_text)
is_last_upper = True
else:
is_last_upper = False
word_in_sentence_no += 1
counted_expressions = counted_expressions + Counter(expressions)
pbar.update(1)
common_expressions = counted_expressions.most_common(50)
pprint.pprint(common_expressions)
return common_expressions
find_capitalised_expressions()
# [('Nr', 1266),
# ('Art', 497),
# ('U', 477),
# ('Dz', 431),
# ('Policji', 253),
# ('Kodeksu', 217),
# ('<NAME>', 127),
# ('<NAME>', 107),
# ('<NAME>', 103),
# ('Zakładu', 101),
# ('<NAME>', 100),
# ('Zakład', 99),
# ('Ministrów', 79),
# ('<NAME>', 74),
# ('Prawo', 57),
# ('<NAME>', 57),
# ('Minister <NAME>', 53),
# ('<NAME>', 51),
# ('<NAME>', 51),
# ('I', 49),
# ('Sądu', 48),
# ('<NAME>', 46),
# ('<NAME>', 45),
# ('Państwa', 45),
# ('<NAME>', 44),
# ('Rady Ministrów', 44),
# ('<NAME>', 44),
# ('Przepisy', 43),
# ('<NAME>', 39),
# ('Pracy', 37),
# ('Minister', 37),
# ('Polsce', 36),
# ('Kodeks', 36),
# ('<NAME>', 35),
# ('Zakładu Ubezpieczeń Społecznych', 34),
# ('Agencji', 34),
# ('Finansów', 33),
# ('II', 31),
# ('Ministrem Finansów', 31),
# ('Patentowy', 30),
# ('Prezes Rady Ministrów', 30),
# ('BGŻ SA', 30),
# ('Pracodawca', 30),
# ('FRD', 30),
# ('Leśnictwa', 29),
# ('Głównego Urzędu Statystycznego', 28),
# ('ECU', 26),
# ('Zasobów Naturalnych', 26),
# ('FUS', 26),
# ('Rady', 25)]
# Apply the NER algorithm to identify the named entities in the same set of documents (not lemmatized) using the n82 model.
# eval_files(top_50_bills, lpmn='any2txt|wcrft2|liner2({"model":"n82"})', out_path='out_n82/')
# Plot the frequency (histogram) of the coares-grained classes (e.g. nam_adj, nam_eve, nam_fac`).
def find_expressions_classes():
out_path = 'out_n82/'
result = dict()
with tqdm(total=50) as pbar:
for filename in os.listdir(out_path):
if filename.endswith(".ccl"):
filepath = os.path.join(out_path, filename)
ccl_file = et.parse(filepath).getroot()
for chunk in ccl_file:
for sentence in chunk:
sentence_stats = {}
expression = ""
for token in sentence:
if token.tag == 'tok':
for ann in token.findall("ann"):
if ann.text != "0":
ann_key = (ann.get("chan"), ann.text)
word = token.find("orth").text
values = sentence_stats.get(ann_key, [])
values.append(word)
sentence_stats[ann_key] = values
# print(sentence_stats)
for ann, expr in sentence_stats.items():
expression = " ".join(expr)
entity_name = ann[0]
val = result.get((entity_name, expression), 0)
result[(entity_name, expression)] = val + 1
# print(result)
pbar.update(1)
return result
result = find_expressions_classes()
def plot_histogram(result):
coares_grained_classes = dict()
for key, val in result.items():
coares_grained_class = "_".join(key[0].split("_")[:2])
recent_val = coares_grained_classes.get(coares_grained_class, 0)
coares_grained_classes[coares_grained_class] = recent_val + val
print(coares_grained_classes)
# {'nam_pro': 911,
# 'nam_org': 2868,
# 'nam_loc': 346,
# 'nam_oth': 138,
# 'nam_adj': 60,
# 'nam_liv': 163,
# 'nam_eve': 8,
# 'nam_fac': 32}
plt.figure(figsize=(10, 6))
plt.title("Coarse-grained classes histogram")
plt.bar(x=list(coares_grained_classes.keys()), height=list(coares_grained_classes.values()))
plt.savefig("histogram.png")
plt.show()
plot_histogram(result)
# Display 10 most frequent Named Entities for each coarse-grained type.
print(result)
def show_top_10_in_coarse_grained_classes(result):
coarse_grained_classes = dict()
for key, val in result.items():
coarse_grained_class = "_".join(key[0].split("_")[:2])
recent_val = coarse_grained_classes.get(coarse_grained_class, dict())
recent_val.update({
key[1]: val
})
coarse_grained_classes[coarse_grained_class] = recent_val
# print(coarse_grained_classes)
for key, val in coarse_grained_classes.items():
print(f"------------- Top 10 for {key} -------------")
sorted_elements = sorted(val.items(), key=lambda kv: kv[1], reverse=True)
pprint.pprint(sorted_elements[:10])
# show_top_10_in_coarse_grained_classes(result)
# ------------- Top 10 for nam_pro -------------
# [('Dz . U .', 477),
# ('Kodeksu postępowania administracyjnego', 19),
# ('Kodeksu rodzinnego', 17),
# ('<NAME>', 16),
# ('Kodeksu karnego', 16),
# ('Kodeksu postępowania karnego', 15),
# ('Ordynacja podatkowa', 12),
# ('Kodeksu karnego wykonawczego', 11),
# ('Kodeksu postępowania cywilnego', 10),
# ('Kodeksu cywilnego', 7)]
# ------------- Top 10 for nam_org -------------
# [('<NAME>', 134),
# ('<NAME>', 104),
# ('Rada Ministrów', 93),
# ('Minister Spraw Wewnętrznych', 83),
# ('Prezes Rady Ministrów', 61),
# ('<NAME>', 54),
# ('<NAME>', 44),
# ('<NAME>', 41),
# ('Urzędzie Patentowym', 36),
# ('Minister Finansów', 35)]
# ------------- Top 10 for nam_loc -------------
# [('<NAME>', 143),
# ('Polsce', 36),
# ('Warszawie', 12),
# ('Warszawy', 11),
# ('Warszawa', 11),
# ('Polski', 8),
# ('<NAME>', 7),
# ('Rzeczpospolita Polska', 6),
# ('Poznaniu', 6),
# ('Wrocławiu', 6)]
# ------------- Top 10 for nam_oth -------------
# [('złotych', 63),
# ('zł', 31),
# ('ECU', 13),
# ('Minister Edukacji Narodowej', 11),
# ('Minister <NAME>', 5),
# ('PESEL', 3),
# ('Ă - - - - - Ĺ - - - - - - - - - - - - - - - - - - - Ĺ - - - - - - - - - - - - - - - - - - - - - - - -', 2),
# ('FUS', 2),
# ('É - - - - - Â - - - - - - - - - - - - - - - - - - - Â - - - - - - - - - - - - - - - - - - - - - - - -', 1),
# ('Č - - - - - Á - - - - - - - - - - - - - - - - - - - Á - - - - - - - - - - - - - - - - - - - - - - - -', 1)]
# ------------- Top 10 for nam_adj -------------
# [('polski', 22),
# ('polskich', 9),
# ('polskiej', 4),
# ('polskim', 4),
# ('polskiego', 3),
# ('Polskiej', 2),
# ('polską', 2),
# ('polscy', 2),
# ('polskimi', 2),
# ('Wojewódzki', 2)]
# ------------- Top 10 for nam_liv -------------
# [('<NAME>', 39),
# ('<NAME>', 11),
# ('Głównego Inspektora', 10),
# ('Art', 8),
# ('Głównym Inspektorem', 5),
# ('Kartograficznym', 4),
# ('III', 4),
# ('<NAME>', 4),
# ('<NAME>', 3),
# ('Najwyższego', 3)]
# ------------- Top 10 for nam_eve -------------
# [('BGŻ SA', 2),
# ('Monitorze Sądowym', 2),
# ('Narodowego Spisu Powszechnego', 1),
# ('Świętem Straży Granicznej', 1),
# ('Generalny Konserwator Zabytków', 1),
# ('<NAME>', 1)]
# ------------- Top 10 for nam_fac -------------
# [('<NAME>', 16),
# ('Str<NAME>', 5),
# ('NIP', 2),
# ('<NAME>', 2),
# ('<NAME> - Reformowanego', 1),
# ('<NAME>', 1),
# ('REGON', 1),
# ('Zakładu', 1),
# ('<NAME>', 1),
# ('<NAME> - Metodystycznego', 1)]
# Display 50 most frequent Named Entities including their count and fine-grained type.
sorted_results = sorted(result.items(), key=lambda kv: -kv[1])
pprint.pprint(sorted_results[:50])
# [(('nam_pro_media_periodic', 'Dz . U .'), 477),
# (('nam_loc_gpe_country', 'Rzeczypospolitej Polskiej'), 143),
# (('nam_org_institution', 'Skarbu Państwa'), 134),
# (('nam_org_institution', 'Urząd Patentowy'), 104),
# (('nam_org_organization', 'Państwowej Straży Pożarnej'), 96),
# (('nam_org_institution', 'Rada Ministrów'), 93),
# (('nam_org_institution', 'Minister Spraw Wewnętrznych'), 83),
# (('nam_oth_currency', 'złotych'), 63),
# (('nam_org_institution', 'Prezes Rady Ministrów'), 61),
# (('nam_org_institution', 'Funduszu Pracy'), 54),
# (('nam_org_institution', 'Skarb Państwa'), 44),
# (('nam_org_institution', 'Urzędu Patentowego'), 41),
# (('nam_liv_person', '<NAME>'), 39),
# (('nam_org_institution', 'Urzędzie Patentowym'), 36),
# (('nam_loc_gpe_country', 'Polsce'), 36),
# (('nam_org_institution', 'Minister Finansów'), 35),
# (('nam_org_institution', 'Zakładu Ubezpieczeń Społecznych'), 35),
# (('nam_org_institution', 'Minister Pracy i Polityki Socjalnej'), 33),
# (('nam_org_institution', 'Fundusz Pracy'), 33),
# (('nam_oth_currency', 'zł'), 31),
# (('nam_org_institution', 'Zakład Ubezpieczeń Społecznych'), 30),
# | |
# -*- coding: utf-8 -*-
from django.test import TestCase
from prediction.models import Element, Prediction, PredictionLinear, PredictionLinearFunction
from product_card.models import ProductCard
from offer.models import Offer, ChequeOffer
from company.models import Company, Employee
from cheque.models import FNSCheque, FNSChequeElement, QRCodeReader, ImportProverkachekaComFormatLikeFNS, ShowcasesCategory
from datetime import datetime
from telegram.models import Telegram
#from prediction.tests.Base import list_buy_milk
from prediction.tests import Base as BasePrediction
#print Base.list_buy_milk()
import json
import time
import urllib
import urllib2, base64
from loader.cheque.proverkacheka_com import generate_cheque_qr_codes
class Base(TestCase):
def setUp(self):
self.maxDiff = None
def test_1(self):
"""
Польователь первый раз пользуется сейрвисом
Создать пользователя по клиентским данным
Создать пользовтаелю сомпанию
"""
pety_employee = Employee(title='Pety', key='123zxc')
pety_employee.save()
company_family = Company(title='family')
company_family.save()
company_family.employees.add(pety_employee)
company_family.title = 'test'
self.assertEqual('test', company_family.title)
company_family = Company.objects.get(employees__in=[pety_employee])
self.assertEqual('family', company_family.title)
def test_2(self):
"""
пользователь хочет зашрузить инфо по чеку
Должен авторизоваться
получить сессию
Отправить текс из QR кода
для сохранения вбрана компания по мумолчанию
Получить сохранненый чек из базы по параметрам
"""
#TODO пока проинициализируем так
self.test_1()
#in
key = '123zxc'
qr_text = 't=20201107T2058&s=63.00&fn=9288000100192401&i=439&fp=2880362760&n=1'
self.assertEqual(0, FNSCheque.objects.all().count())
#calc
pety_employee = Employee.objects.get(key=key)
self.assertEqual('Pety', pety_employee.title)
company_family = Company.objects.get(employees__in=[pety_employee])
FNSCheque.import_from_proverkacheka_com_format_like_fns(qr_text, company_family)
self.assertEqual(1, FNSCheque.objects.all().count())
cheque_p = QRCodeReader.qr_text_to_params(qr_text)
cheque = FNSCheque.objects.get(fns_fiscalDocumentNumber=cheque_p['FD'], fns_fiscalDriveNumber=cheque_p['FN'], fns_fiscalSign=cheque_p['FDP'])
cheque.company = Company.objects.get(employees__in=[pety_employee])
cheque.save()
FNSCheque.import_from_proverkacheka_com_format_like_fns(qr_text, company_family)
FNSCheque.import_from_proverkacheka_com_format_like_fns('t=11&s=22&fn=33&i=44&fp=55&n=1', company_family)
self.assertEqual(1, FNSCheque.objects.all().count())
def test_3(self):
"""
Петя хочет загрузить еще несколко чеков
загрцзить все имеющиеся чеки
выбтащить и посмотреть что ему скакжет авто сгереренаяя функйция потребления Молока.
"""
pass
def test_4(self):
"""
импорт из https://proverkacheka.com/check&p=2
получить все чеки
получить последний чек загруденный Робо1
загружать последоватетно страницы из https://proverkacheka.com/check&p=2 до тех пор пока не найдем на странице последний чек
загрузить страницу
вытащить из нее чеки
преобразовав чеки в формат для сравнения как текст из QR кода
параметров всего 5
попыиаиься найти нужный чек
если не нашли загрузить следубщую, ограничимся 100 страницами, но первый раз гораничение в 7000
сохранить их в репозиторий от имени Робо1
получить список QR кодов скормить его стандартном мехаизму сохранения чеков у пользователей
потом возможно использовать их для показа похожих товаров в других магазинах и рекомедации цены
"""
robo1_employee = Employee(title='robo1', key='<KEY>')
robo1_employee.save()
company_family = Company(title='family')
company_family.save()
company_family.employees.add(robo1_employee)
last_fns_cheques = FNSCheque.objects.filter(company=company_family).order_by('-id')
has_last_fns_cheques = False
if last_fns_cheques:
has_last_fns_cheques = True
last_fns_cheque = last_fns_cheques[0]
#TODO таким образом будем ломать тест чтобы помнить о необходимости исправления
# cheque_params = generate_cheque_qr_codes(has_last_fns_cheques, last_fns_cheques)
#
# self.assertEqual(0, FNSCheque.objects.all().count())
# self.assertEqual(0, FNSChequeElement.objects.all().count())
# for s in cheque_params:
# print s
# #TODO нужно передовать компанию для которой сохряняются чеки
# FNSCheque.import_from_proverkacheka_com_format_like_fns(s, company_family)
# #print 'sleep'
# #time.sleep(10)
# self.assertEqual(50, FNSCheque.objects.all().count())
# self.assertTrue(99 < FNSChequeElement.objects.all().count())
def test_5(self):
"""
зайти на ресурс и посмотреть офферы каких витирин воообще присутсвуют.
1. товары можно искать тестовой строкой
2. дерево категорий товаров
"""
company_family = Company(title='family')
company_family.save()
for i in BasePrediction.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
for i in BasePrediction.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
#создавать офферы на основе имеющихся сущностей
text = u'СЫР'
offers = ChequeOffer.find(text)
self.assertEqual([
{u'datetime': {u'update': u'2020-05-28T22:51:00'},
u'price': {u'one': 13500, u'per_one_gram': 61363.64},
u'product': {u'title': u'4607004891694 \u0421\u042b\u0420 \u0421\u041b\u0418\u0412\u041e\u0427\u041d\u042b\u0419 HOCHLA'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-28T22:51:00'},
u'price': {u'one': 13990, u'per_one_gram': 55960.0},
u'product': {u'title': u'8600742011658 \u0421\u042b\u0420 \u0421\u0415\u0420\u0411\u0421\u041a\u0410\u042f \u0411\u0420\u042b\u041d\u0417\u0410'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-15T20:45:00'},
u'price': {u'one': 49990, u'per_one_gram': 49990.0},
u'product': {u'title': u'2364939000004 \u0421\u042b\u0420 \u041a\u041e\u0420\u041e\u041b\u0415\u0412\u0421\u041a\u0418\u0419 51%'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-10T21:08:00'},
u'price': {u'one': 59990, u'per_one_gram': 59990.0},
u'product': {u'title': u'2316971000009 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u0418\u0427\u0410\u041b'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-04-21T14:04:00'},
u'price': {u'one': 50306, u'per_one_gram': 50306.0},
u'product': {u'title': u'2372240000002 \u0421\u042b\u0420 \u0413\u0420\u0410\u041d\u0414 SPAR 45%'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-04-21T14:04:00'},
u'price': {u'one': 37670, u'per_one_gram': 37670.0},
u'product': {u'title': u'2364178000001 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u041f\u0420\u0415'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 49900, u'per_one_gram': 49900.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u0420\u041e\u0421\u0421\u0418\u0419\u0421\u041a\u0418\u0419 45%'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 18900, u'per_one_gram': 75600.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u041c\u0410\u0421\u041a\u0410\u0420.80% 250\u0413\u0420'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0421 \u041b\u0423\u041a\u041e\u041c 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-23T21:58:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3899, u'per_one_gram': 48737.5},
u'product': {u'title': u'\u041f\u041b.\u0421\u042b\u0420 \u042f\u041d\u0422\u0410\u0420\u042c \u0424\u041e\u041b80\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 15900, u'per_one_gram': 39750.0},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0424\u0415\u0422\u0410\u041a\u0421\u0410 400\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3399, u'per_one_gram': 37766.67},
u'product': {u'title': u'\u0421\u042b\u0420 \u041f\u041b \u0412\u041e\u041b\u041d\u0410 45% 90\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-05-02T20:56:00'},
u'price': {u'one': 3899, u'per_one_gram': 48737.5},
u'product': {u'title': u'\u041f\u041b.\u0421\u042b\u0420 \u042f\u041d\u0422\u0410\u0420\u042c \u0424\u041e\u041b80\u0413'},
u'showcase': {u'address': u''}},
{u'datetime': {u'update': u'2020-06-03T14:50:00'},
u'price': {u'one': 59899, u'per_one_gram': 59899.0},
u'product': {u'title': u'2364178000001 \u0421\u042b\u0420 \u041c\u0410\u0410\u0421\u0414\u0410\u041c 45% \u041f\u0420\u0415'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'datetime': {u'update': u'2020-06-03T14:50:00'},
u'price': {u'one': 20000, u'per_one_gram': 50000.0},
u'product': {u'title': u'4607004892677 \u0421\u042b\u0420 HOCHLAND \u041c\u042f\u0413\u041a\u0418\u0419'},
u'showcase': {u'address': u'107076, \u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}}
], offers)
def test_6(self):
"""
получить последний по дате оффер, минимальный, и максимальный за последнюю неделю/месяц/квартал/год/все время , 201213 1212 для адреса
"""
company_family = Company(title='family')
company_family.save()
for i in BasePrediction.list_buy_milk():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
for i in BasePrediction.list_buy_cheese():
fns_cheque = FNSCheque(is_manual=False, company=company_family, fns_dateTime=i['dateTime'], json={'data': {'json': i}})
fns_cheque.save()
for j in i['items']:
e = FNSChequeElement(fns_cheque=fns_cheque, name=j['name'], quantity=j['quantity'], volume=j['volume'], sum=j['sum'], price=j['price'])
e.save()
#создавать офферы на основе имеющихся сущностей
text = u'МОЛОКО'
#text = u'СЫР'
offers = ChequeOffer.find(text)
print "==================================="
#self.assertEqual([], offers)
#2) как то их группируем - наверно в рамках одной витрины (инн + ритеил адресс), но даже если оффер меняется(изменилось название в чеке - это происходит очень редко и не важно, даже больше скажем что важно только последнее) т.е. если название в рамках одного магазина разные то это разные продукты
#{u'datetime': {u'update': u'2020-06-03t14:50:00'},
#u'price': {u'one': 20000, u'per_one_gram': 50000.0},
#u'product': {u'title': u'4607004892677 \u0421\u042b\u0420 hochland \u041c\u042f\u0413\u041a\u0418\u0419'},
#u'showcase': {u'address': '107076, \xd0\xb3.\xd0\x9c\xd0\xbe\xd1\x81\xd0\xba\xd0\xb2\xd0\xb0, \xd1\x83\xd0\xbb.\xd0\x91\xd0\xbe\xd0\xb3\xd0\xbe\xd1\x80\xd0\xbe\xd0\xb4\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9 \xd0\x92\xd0\xb0\xd0\xbb, \xd0\xb4.6, \xd0\xba\xd0\xbe\xd1\x80\xd0\xbf.2'}}
offer_analytics = ChequeOffer.analytics_last_min_max_price(offers)
self.assertEqual([
{u'last_datetime': u'2020-06-03T14:50:00',
u'count': 2,
u'price_analytics': {u'last': {u'one': 5990, u'per_one_gram': 5990.0},
u'max': {u'one': 6990, u'per_one_gram': 6990.0},
u'min': {u'one': 5990, u'per_one_gram': 5990.0}},
u'product': {u'title': u'4607045982771 \u041c\u041e\u041b\u041e\u041a\u041e SPAR \u0423\u041b\u042c\u0422\u0420\u0410\u041f\u0410'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'last_datetime': u'2020-05-24T12:56:00',
u'count': 1,
u'price_analytics': {u'last': {u'one': 4990, u'per_one_gram': 4990.0},
u'max': {u'one': 4990, u'per_one_gram': 4990.0},
u'min': {u'one': 4990, u'per_one_gram': 4990.0}},
u'product': {u'title': u'4607045982788 \u041c\u041e\u041b\u041e\u041a\u041e SPAR \u0423\u041b\u042c\u0422\u0420\u0410\u041f\u0410'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{u'last_datetime': u'2020-05-15T20:45:00',
u'count': 1,
u'price_analytics': {u'last': {u'one': 5490, u'per_one_gram': 5935.14},
u'max': {u'one': 5490, u'per_one_gram': 5935.14},
u'min': {u'one': 5490, u'per_one_gram': 5935.14}},
u'product': {u'title': u'4607167840416 \u041c\u041e\u041b\u041e\u041a\u041e SPAR 3,2% 925'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 2,
u'last_datetime': u'2020-05-10T21:08:00',
u'price_analytics': {u'last': {u'one': 8990, u'per_one_gram': 5288.24},
u'max': {u'one': 8990, u'per_one_gram': 5288.24},
u'min': {u'one': 8990, u'per_one_gram': 5288.24}},
u'product': {u'title': u'4607167841154 \u041c\u041e\u041b\u041e\u041a\u041e SPAR 2,5% 1,7'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 1,
u'last_datetime': u'2020-05-06T21:53:00',
u'price_analytics': {u'last': {u'one': 5990, u'per_one_gram': 5990.0},
u'max': {u'one': 5990, u'per_one_gram': 5990.0},
u'min': {u'one': 5990, u'per_one_gram': 5990.0}},
u'product': {u'title': u'4690228007842 \u041c\u041e\u041b\u041e\u041a\u041e \u0414\u041e\u041c\u0418\u041a \u0412 \u0414\u0415\u0420\u0415\u0412'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
{
u'count': 1,
u'last_datetime': u'2020-05-23T21:58:00',
u'price_analytics': {
u'last': {
u'one': 7990, u'per_one_gram': 5707.14
},
u'max': {u'one': 7990, u'per_one_gram': 5707.14},
u'min': {u'one': 7990, u'per_one_gram': 5707.14}},
u'product': {u'title': u'\u041c\u041e\u041b\u041e\u041a\u041e \u041f\u0410\u0421\u0422.3,7%1400'},
u'showcase': {u'address': u'\u0433.\u041c\u043e\u0441\u043a\u0432\u0430, \u0443\u043b.\u0411\u043e\u0433\u043e\u0440\u043e\u0434\u0441\u043a\u0438\u0439 \u0412\u0430\u043b, \u0434.6, \u043a\u043e\u0440\u043f.2'}},
], offer_analytics)
def test_7(self):
"""
1. создать чек на основе строки полученой из QR кода
2. обновить данные в чеке из проверка чеков ком
"""
company_family = Company(title='family')
company_family.save()
qr_text = 't=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1'
fns_cheque = FNSCheque.create_fns_cheque_from_qr_text(qr_text, company_family)
fns_cheque.save()
self.assertEqual('', fns_cheque.fns_userInn)
self.assertEqual('', FNSCheque.objects.get(fns_fiscalDocumentNumber='115180').fns_userInn)
fns_cheque_json = ImportProverkachekaComFormatLikeFNS.get_fns_cheque_by_qr_params('', qr_text)
self.assertTrue(FNSCheque.has_cheque_with_fiscal_params(company_family,
fns_cheque_json["document"]["receipt"]["fiscalDocumentNumber"],
fns_cheque_json["document"]["receipt"]["fiscalDriveNumber"],
fns_cheque_json["document"]["receipt"]["fiscalSign"],
fns_cheque_json["document"]["receipt"]["dateTime"],
fns_cheque_json["document"]["receipt"].get("totalSum", 'Error')))
self.assertTrue(FNSCheque.has_cheque_with_fns_cheque_json(company_family, fns_cheque_json))
#FNSCheque.update_cheque_from_json(fns_cheque, fns_cheque_json)
fns_cheque.update_cheque_from_json(fns_cheque_json)
self.assertEqual(u'5258056945', fns_cheque.fns_userInn)
self.assertEqual(u'5258056945', FNSCheque.objects.get(fns_fiscalDocumentNumber='115180').fns_userInn)
def test_8(self):
"""
Смоделировать ввидение команд новым пользователем.
/start
t=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1
t=20200506T215300&s=1351.88&fn=9285000100127255&i=83300&fp=328049751&n=1
t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1
/cheques
что приходит от пользователя?
сообщение из ТГ, которое содержит запрос к боту.
который может не ожидать ответ
может ожидать мгновенный ответ
может ожидать отложенный ответ
может ожидать несколько отложенных ответов
"""
company_family = Company(title='family')
company_family.save()
#full_message = {}
#chat_id = full_message['message']['chat']['id']
#message = full_message['message']['text']
#telegram_user_id = message['message']['from']['id']
#first_name = message['message']['from'].get('first_name', '')
#last_name = message['message']['from'].get('last_name', '')
#language_code = message['message']['from'].get('language_code', '')
qr_tests = [
't=20200524T125600&s=849.33&fn=9285000100127361&i=115180&fp=1513716805&n=1',
't=20200506T215300&s=1351.88&fn=9285000100127255&i=83300&fp=328049751&n=1',
't=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1',
'test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u'Привет test t=20200421T140400&s=1057.46&fn=9285000100127255&i=79753&fp=3686248129&n=1 test',
u"""
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('s') >= 0 and \
message.find('fn') >= 0 and \
message.find('fp') >= 0 and \
message.find('i') >= 0 t=20201216T1818&s=29.00&fn=9280440301295284&i=236&fp=3107860384&n=1 sf s выаавы ы ва ывage.find('t') >= 0 and \
message.find('s') >= 0 and \
message.find('fn') >= 0 and \
message.find('fp') >= 0 | |
'{}' {} :code:{}:body:{}".format(
request.method, request.path, response.status_code, body)
logger.info(log_base.format(NORTHBOUND, RESPONSE, log_content))
return response
@app.before_request
def before():
# todo with request
# e.g. print request.headers
pass
# Topology API implementation
@app.route('/chunkete/topology', methods=['GET'])
def getChunketeTopology():
resp = {
"boxes": [],
"links": []
}
log_content = ""
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
for index_controller in range(len(controllers)):
try:
boxes = session.query(Box).filter(
Box.controller_id == index_controller).all()
(controller_resp,
code) = controllers[index_controller].getChunketeTopology()
log_content = "controller:{}:response:{}/{}".format(
index_controller, code, controller_resp)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["OK"]["code"]:
for box in controller_resp["boxes"]:
for index_phy in range(len(box["phys"])):
phy = session.query(Phy).filter(
Phy.controller_id == index_controller).filter(
Phy.phy_id_controller ==
box["phys"][index_phy]["id"]
).one()
box["phys"][index_phy]["id"] = phy.id
for db_box in boxes:
if db_box.box_id_controller == box["id"]:
box["id"] = db_box.id
break
resp["boxes"].append(box)
else:
return controller_resp, code
except NoResultFound:
return json.dumps({
"timestamp": "2019-09-10T14:18:24.866+0000",
"status": API_RESPONSE["NOTFOUND"]["code"],
"error": API_RESPONSE["NOTFOUND"]["content"],
"message": "No Result Found for the request",
"path": request.path
}), API_RESPONSE["NOTFOUND"]["code"]
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
response = jsonify(resp)
return response, API_RESPONSE["OK"]["code"]
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/LTEConfig',
methods=['PUT'])
def putInterfaceLTEConfig(phy_id):
# {
# "cellIdentity": 256,
# "earfcndl": 41690,
# "phyCellId": 5,
# "prachrootseqindex": 100,
# "primaryMMEAddress": "192.168.100.25",
# "primaryMMEPort": 333,
# "primaryPlmnId": "00101",
# "refSignalPower": -40,
# "reservedForOperatorUse": "not-reserved",
# "trackingAreaCode": 67
# }
try:
content = request.data
content_dict = json.loads(content)
log_content = "phy_id:{}:content:{}".format(phy_id, content_dict)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if 0 > content_dict["cellIdentity"] > 256:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["earfcndl"] not in [i for j in (
range(2750, 3449),
range(41690, 43489),
range(37750, 38249)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["phyCellId"] > 500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["prachrootseqindex"] > 1023:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEAddress" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEPort" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryPlmnId" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if -40 > content_dict["refSignalPower"] > -10:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["reservedForOperatorUse"] != "not-reserved":
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["trackingAreaCode"] > 65535:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceLTEConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/type/<phy_type>',
methods=['PUT'])
def putInterfaceType(phy_id, phy_type):
try:
log_content = "phy_id:{}:phy_type:{}".format(phy_id, phy_type)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].putInterfaceType(
phy.phy_id_controller, phy_type)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":phy_type:{}:response:{}/{}".\
format(
phy.controller_id, phy.phyid_controller,
phy_id, phy_type, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wiredConfig',
methods=['PUT'])
def putInterfaceWiredConfig(phy_id):
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWiredConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wirelessConfig',
methods=['PUT'])
def putInterfaceWirelessConfig(phy_id):
# Verify content
# {
# "channelBandwidth": 20,
# (Se aceptan 20, 40 y 80)
# "channelNumber": 36,
# (Se acepta cualquier canal de la banda de 2.4 y/o de la banda de 5GHz;
# según el nodo puede o no sopotar DFS así que no está restringido
# a canales "normales")
# "txPower": 2000
# (Valor en mBm; se acepta desde 0 hasta 3500 aunque lo
# normal suelen ser 2300)
# }
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
content_dict = json.loads(content)
if content_dict["channelBandwidth"] not in [20, 40, 80]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["channelNumber"] not in [i for j in (
range(1, 11),
range(36, 68, 4),
range(100, 140, 4)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 >= content_dict["txPower"] > 3500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWirelessConfig(phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
return (API_RESPONSE["CREATED"]["content"],
API_RESPONSE["CREATED"]["code"])
except KeyError:
logger.error("Malformed request")
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Chunk API implementation
@app.route('/chunkete/chunk', methods=['GET'])
def getAllChunks():
log_content = ""
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
# chunks = {}
# chunk_id_list =[]
response = []
try:
db_chunks = session.query(Chunk).all()
for db_chunk in db_chunks:
response.append(_dictChunk(db_chunk))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk', methods=['POST'])
def registerNewChunk():
try:
content = request.data
log_content = "content:{}".format(json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
chunk_dict = json.loads(content)
controllers_phys = {}
controllers_content = {}
# Split the phys included in the chunk per controller
for phy in chunk_dict["physicalInterfaceList"]:
phy = session.query(Phy).filter(Phy.id == phy["id"]).one()
phy_dict = json.loads(phy.phy_json)
phy_id_dict = {"id": phy_dict["id"]}
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(
phy.phy_id_controller)
controllers_content[
phy.controller_id][
"physicalInterfaceList"].append(phy_id_dict)
else:
controllers_phys[phy.controller_id] = [phy.phy_id_controller]
controllers_content[phy.controller_id] = {
"name": chunk_dict["name"],
"physicalInterfaceList": [phy_id_dict],
}
if "assignedQuota" in chunk_dict.keys():
controllers_content[phy.controller_id]["assignedQuota"] = \
chunk_dict["assignedQuota"]
else:
chunk_dict["assignedQuota"] = 0
controllers_content[phy.controller_id]["assignedQuota"] = 0
if "linkList" in chunk_dict.keys():
controllers_content[phy.controller_id]["linkList"] = \
chunk_dict["linkList"]
else:
chunk_dict["linkList"] = []
controllers_content[phy.controller_id]["linkList"] = []
if "serviceList" in chunk_dict.keys():
controllers_content[phy.controller_id]["serviceList"] = \
chunk_dict["serviceList"]
else:
chunk_dict["serviceList"] = []
controllers_content[phy.controller_id]["serviceList"] = []
# # Create a new chunk and add to database
# # Get the next free ID in db
# db_id_list = session.query(Chunk.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_chunk_id = 1
# else:
# new_chunk_id = db_id_list[len(db_id_list)-1]+1
# Add the chunk in the database
chunk = Chunk(
name=chunk_dict["name"],
serviceList=json.dumps([]),
assignedQuota=chunk_dict["assignedQuota"],
controllers_phys=str(controllers_phys),
phyList=str(
[phy["id"] for phy in chunk_dict["physicalInterfaceList"]]
),
linkList=json.dumps([]), chunk_json=json.dumps(chunk_dict))
session.add(chunk)
# Register the chunk on each of the controllers
controllers_chunk_dict = {}
for controller_id in controllers_content.keys():
response, code = controllers[controller_id].registerNewChunk(
json.dumps(controllers_content[controller_id]))
log_content = "controller:{}:content:{}"
log_content += ":response:{}/{}".\
format(
controller_id,
json.dumps(
controllers_content[controller_id]),
code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["CREATED"]["code"]:
controllers_chunk_dict[controller_id] = response["id"]
else:
return errorResponder(
"CONTROLLER", "Managed Controller returned an error")
# Update Service in Database
chunk_dict["id"] = chunk.id
chunk.chunk_json = json.dumps(chunk_dict)
chunk.controllers_chunk = str(controllers_chunk_dict)
session.commit()
return json.dumps(
{'id': chunk.id}), API_RESPONSE["CREATED"]["code"]
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['GET'])
def getChunkById(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
response_data = _dictChunk(chunk)
return jsonify(
response_data), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Object not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['DELETE'])
def removeExistingChunk(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
session.close()
controllers_phys = eval(chunk.controllers_phys)
serviceList = eval(chunk.serviceList)
# Remove the Services from the chunk
while serviceList:
removeExistingSWAMService(
chunk_id, serviceList[0], interface=INTERNAL)
serviceList.pop(0)
for controller_id in controllers_phys.keys():
response, code = controllers[controller_id].removeExistingChunk(
eval(chunk.controllers_chunk)[controller_id])
log_content = "controller:{}:chunk_id:{}"
log_content += ":response:{}/{}".\
format(controller_id, chunk_id, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
# Remove the chunk from the database
session.delete(chunk)
session.commit()
return API_RESPONSE["OK"]["content"], API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder("DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Service API implementation
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['GET'])
def getAllSWAMServices(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
response = []
try:
db_chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
for service_id in eval(db_chunk.serviceList):
db_service = session.query(Service).filter(
Service.id == service_id).one()
response.append(_dictService(db_service))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['POST'])
def registerNewSWAMService(chunk_id):
# VERIFY CONTENT
# {
# "lteConfig": { (Más info en los mails que te he pasado de accelleran)
# "cellReserved": "not-reserved",
# "mmeAddress": "192.168.50.2",
# "mmePort": 333,
# "plmnId": "00101"
# },
# "selectedPhys": [
# (Sólo se aceptan interfaces de tipo SUB6_ACCESS,
# LTE_PRIMARY_PLMN y WIRED_TUNNEL)
# 14, 23
| |
coins.{2}"
.format(price, price*5, inquisition))
@commands.command(pass_context=True)
@asyncio.coroutine
def spell(self, ctx, *, name: str= None):
"""Tells you information about a certain spell."""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the name or words of a spell.")
return
spell = get_spell(name)
if spell is None:
yield from self.bot.say("I don't know any spell with that name or words.")
return
if type(spell) is list:
embed = discord.Embed(title="Suggestions", description="\n".join(spell))
yield from self.bot.say("I couldn't find that spell, maybe you meant one of these?", embed=embed)
return
# Attach item's image only if the bot has permissions
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if permissions.attach_files and spell["image"] != 0:
filename = spell['name'] + ".png"
while os.path.isfile(filename):
filename = "_" + filename
with open(filename, "w+b") as f:
f.write(bytearray(spell['image']))
f.close()
with open(filename, "r+b") as f:
yield from self.bot.upload(f)
f.close()
os.remove(filename)
long = ctx.message.channel.is_private or ctx.message.channel.name == ask_channel_name
embed = self.get_spell_embed(ctx, spell, long)
yield from self.bot.say(embed=embed)
@commands.command(pass_context=True, aliases=["houses", "guildhall", "gh"])
@asyncio.coroutine
def house(self, ctx, *, name: str=None):
"""Shows info for a house or guildhall"""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the name of the house or guildhall you want to check.")
return
world = None
if ctx.message.server is not None:
world = tracked_worlds.get(ctx.message.server.id)
house = yield from get_house(name, world)
if house is None:
yield from self.bot.say("I couldn't find a house with that name.")
return
if type(house) is list:
embed = discord.Embed(title="Suggestions", description="\n".join(house))
yield from self.bot.say("I couldn't find that house, maybe you meant one of these?", embed=embed)
return
# Attach image only if the bot has permissions
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if permissions.attach_files:
filename = house['name'] + ".png"
while os.path.isfile(filename):
filename = "_" + filename
with open(filename, "w+b") as f:
image = get_map_area(house["x"], house["y"], house["z"])
f.write(bytearray(image))
f.close()
# Send image
with open(filename, "r+b") as f:
yield from self.bot.upload(f)
f.close()
os.remove(filename)
yield from self.bot.say(embed=self.get_house_embed(house))
@commands.command(pass_context=True, aliases=["achiev"])
@asyncio.coroutine
def achievement(self, ctx, *, name: str=None):
"""Shows an achievement's information
Spoilers are only shown on ask channel and private messages"""
permissions = ctx.message.channel.permissions_for(get_member(self.bot, self.bot.user.id, ctx.message.server))
if not permissions.embed_links:
yield from self.bot.say("Sorry, I need `Embed Links` permission for this command.")
return
if name is None:
yield from self.bot.say("Tell me the name of the achievement you want to check.")
return
achievement = get_achievement(name)
if achievement is None:
yield from self.bot.say("I couldn't find an achievement with that name.")
return
if type(achievement) is list:
embed = discord.Embed(title="Suggestions", description="\n".join(achievement))
yield from self.bot.say("I couldn't find that achievement, maybe you meant one of these?", embed=embed)
return
ask_channel = get_channel_by_name(self.bot, ask_channel_name, ctx.message.server)
if not (ask_channel == ctx.message.channel or ctx.message.channel.is_private):
achievement["spoiler"] = "*To see spoilers, pm me"
if ask_channel is not None:
achievement["spoiler"] += " or use "+ask_channel.mention
achievement["spoiler"] += ".*"
embed = discord.Embed(title=achievement["name"], description=achievement["description"])
embed.add_field(name="Grade", value=EMOJI[":star:"]*int(achievement["grade"]))
embed.add_field(name="Points", value=achievement["points"])
embed.add_field(name="Spoiler", value=achievement["spoiler"], inline=True)
yield from self.bot.say(embed=embed)
@commands.command(aliases=['serversave','ss'])
@asyncio.coroutine
def time(self):
"""Displays tibia server's time and time until server save"""
offset = get_tibia_time_zone() - get_local_timezone()
tibia_time = datetime.now()+timedelta(hours=offset)
server_save = tibia_time
if tibia_time.hour >= 10:
server_save += timedelta(days=1)
server_save = server_save.replace(hour=10, minute=0, second=0, microsecond=0)
time_until_ss = server_save - tibia_time
hours, remainder = divmod(int(time_until_ss.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
timestrtibia = tibia_time.strftime("%H:%M")
server_save_str = '{h} hours and {m} minutes'.format(h=hours, m=minutes)
reply = "It's currently **{0}** in Tibia's servers.".format(timestrtibia)
if display_brasilia_time:
offsetbrasilia = get_brasilia_time_zone() - get_local_timezone()
brasilia_time = datetime.now()+timedelta(hours=offsetbrasilia)
timestrbrasilia = brasilia_time.strftime("%H:%M")
reply += "\n**{0}** in Brazil (Brasilia).".format(timestrbrasilia)
if display_sonora_time:
offsetsonora = -7 - get_local_timezone()
sonora_time = datetime.now()+timedelta(hours=offsetsonora)
timestrsonora = sonora_time.strftime("%H:%M")
reply += "\n**{0}** in Mexico (Sonora).".format(timestrsonora)
reply += "\nServer save is in {0}.\nRashid is in **{1}** today.".format(server_save_str, get_rashid_city())
yield from self.bot.say(reply)
@staticmethod
def get_char_string(char) -> str:
"""Returns a formatted string containing a character's info."""
if char == ERROR_NETWORK or char == ERROR_DOESNTEXIST:
return char
pronoun = "He"
pronoun2 = "His"
if char['gender'] == "female":
pronoun = "She"
pronoun2 = "Her"
url = url_character + urllib.parse.quote(char["name"].encode('iso-8859-1'))
reply_format = "[{1}]({9}) is a level {2} __{3}__. {0} resides in __{4}__ in the world __{5}__.{6}{7}{8}{10}"
guild_format = "\n{0} is __{1}__ of the [{2}]({3})."
married_format = "\n{0} is married to [{1}]({2})."
login_format = "\n{0} hasn't logged in for **{1}**."
house_format = "\n{0} owns [{1}]({2}) in {3}."
guild = ""
married = ""
house = ""
login = "\n{0} has **never** logged in.".format(pronoun)
if "guild" in char:
guild_url = url_guild+urllib.parse.quote(char["guild"])
guild = guild_format.format(pronoun, char['rank'], char['guild'], guild_url)
if "married" in char:
married_url = url_character + urllib.parse.quote(char["married"].encode('iso-8859-1'))
married = married_format.format(pronoun, char['married'], married_url)
if "house" in char:
house_url = url_house.format(id=char["house_id"], world=char["world"])
house = house_format.format(pronoun, char["house"], house_url, char["house_town"])
if char['last_login'] is not None:
last_login = parse_tibia_time(char['last_login'])
now = datetime.now()
time_diff = now - last_login
if time_diff.days > last_login_days:
login = login_format.format(pronoun, get_time_diff(time_diff))
else:
login = ""
reply = reply_format.format(pronoun, char['name'], char['level'], char['vocation'], char['residence'],
char['world'], guild, married, login, url, house)
if lite_mode:
return reply
# Insert any highscores this character holds
for category in highscores_categories:
if char.get(category, None):
highscore_string = highscore_format[category].format(pronoun2, char[category], char[category+'_rank'])
reply += "\n"+EMOJI[":trophy:"]+" {0}".format(highscore_string)
return reply
def get_user_string(self, ctx, username: str) -> str:
user = get_member_by_name(self.bot, username, ctx.message.server)
if user is None:
return ERROR_DOESNTEXIST
# List of servers the user shares with the bot
user_servers = get_user_servers(self.bot, user.id)
# List of Tibia worlds tracked in the servers the user is
if ctx.message.channel.is_private:
user_tibia_worlds = [world for server, world in tracked_worlds.items() if
server in [s.id for s in user_servers]]
else:
if tracked_worlds.get(ctx.message.server.id) is None:
user_tibia_worlds = []
else:
user_tibia_worlds = [tracked_worlds[ctx.message.server.id]]
# If server tracks no worlds, do not display owned chars
if len(user_tibia_worlds) == 0:
return "I don't know who @**{0.display_name}** is...".format(user)
placeholders = ", ".join("?" for w in user_tibia_worlds)
c = userDatabase.cursor()
try:
c.execute("SELECT name, ABS(last_level) as level, vocation "
"FROM chars "
"WHERE user_id = {0} AND world IN ({1}) ORDER BY level DESC".format(user.id, placeholders),
tuple(user_tibia_worlds))
result = c.fetchall()
if result:
charList = []
online_list = [x.split("_", 1)[1] for x in global_online_list]
for character in result:
character["online"] = ""
if character["name"] in online_list:
character["online"] = EMOJI[":small_blue_diamond:"]
try:
character["level"] = int(character["level"])
except ValueError:
character["level"] = ""
character["vocation"] = get_voc_abb(character["vocation"])
character["url"] = url_character + urllib.parse.quote(character["name"].encode('iso-8859-1'))
charList.append("[{name}]({url}){online} (Lvl {level} {vocation})".format(**character))
char_string = "@**{0.display_name}**'s character{1}: {2}"
plural = "s are" if len(charList) > 1 else " is"
reply = char_string.format(user, plural, join_list(charList, ", ", " and "))
else:
reply = "I don't know who @**{0.display_name}** is...".format(user)
return reply
finally:
c.close()
@staticmethod
def get_monster_embed(ctx, monster, long):
"""Gets the monster embeds to show in /mob command
The message is split in two embeds, the second contains loot only and is only shown if long is True"""
embed = discord.Embed(title=monster["title"])
hp = "?" if monster["health"] is None else "{0:,}".format(monster["health"])
experience = "?" if monster["experience"] is None else "{0:,}".format(monster["experience"])
if not (monster["experience"] is None or monster["health"] is None or monster["health"] < 0):
ratio = "{0:.2f}".format(monster['experience'] / monster['health'])
else:
ratio = "?"
embed.add_field(name="HP", value=hp)
embed.add_field(name="Experience", value=experience)
embed.add_field(name="HP/Exp Ratio", value=ratio)
weak = []
resist = []
immune = []
elements = ["physical", "holy", "death", "fire", "ice", "energy", "earth", "drown", "lifedrain"]
# Iterate through elemental types
for index, value in monster.items():
if index in elements:
if monster[index] == 0:
immune.append(index.title())
elif monster[index] > 100:
weak.append([index.title(), monster[index]-100])
elif monster[index] < 100:
resist.append([index.title(), monster[index]-100])
# Add paralysis to immunities
if monster["paralysable"] == 0:
immune.append("Paralysis")
if monster["senseinvis"] == 1:
immune.append("Invisibility")
if immune:
embed.add_field(name="Immune to", value="\n".join(immune))
else:
embed.add_field(name="Immune to", value="Nothing")
if resist:
embed.add_field(name="Resistant to", value="\n".join(["{1}% {0}".format(*i) for i in resist]))
else:
embed.add_field(name="Resistant to", value="Nothing")
if weak:
embed.add_field(name="Weak to", value="\n".join(["+{1}% {0}".format(*i) for i in weak]))
else:
embed.add_field(name="Weak to", value="Nothing")
# If monster drops no loot, we might as well show everything
if long or not monster["loot"]:
embed.add_field(name="Max damage",
value="{maxdamage:,}".format(**monster) if monster["maxdamage"] is not None else "???")
embed.add_field(name="Abilities", value=monster["abilities"], inline=False)
if monster["loot"] and long:
loot_string = ""
for item in monster["loot"]:
if item["percentage"] is None:
item["percentage"] = "??.??%"
| |
11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
ooOo00 . print_notify ( )
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if ( ooOo00 . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
if 54 - 54: iII111i - I1Ii111
iI1iii1IIIIi = lisp_eid_record ( )
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if ( iI1iii1IIIIi . decode ( ooOo00 . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 7 - 7: i1IIi
iI1iii1IIIIi . print_record ( " " , False )
if 30 - 30: oO0o . i1IIi / I11i
oOoo0OooOOo00 = iI1iii1IIIIi . print_eid_tuple ( )
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
if 2 - 2: oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if ( ooOo00 . alg_id != LISP_NONE_ALG_ID and ooOo00 . auth_len != 0 ) :
ooOOOo0o0oo = lisp_sites_by_eid . lookup_cache ( iI1iii1IIIIi . eid , True )
if ( ooOOOo0o0oo == None ) :
OOOO0OOoO = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( OOOO0OOoO , green ( oOoo0OooOOo00 , False ) ) )
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
return
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
I1ii1I = ooOOOo0o0oo . site
if 17 - 17: iII111i % Oo0Ooo
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
if 3 - 3: II111iiii
I1ii1I . map_notify_acks_received += 1
if 61 - 61: oO0o . I1IiiI + i1IIi
o0OOOoO0O = ooOo00 . key_id
if ( I1ii1I . auth_key . has_key ( o0OOOoO0O ) == False ) : o0OOOoO0O = 0
O0O0 = I1ii1I . auth_key [ o0OOOoO0O ]
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
i111II = lisp_verify_auth ( packet , ooOo00 . alg_id ,
ooOo00 . auth_data , O0O0 )
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
o0OOOoO0O = "key-id {}" . format ( o0OOOoO0O ) if o0OOOoO0O == ooOo00 . key_id else "bad key-id {}" . format ( ooOo00 . key_id )
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if i111II else "failed" , o0OOOoO0O ) )
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if ( i111II == False ) : return
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if ( ooOo00 . retransmit_timer ) : ooOo00 . retransmit_timer . cancel ( )
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
I11i1i1i1iii = source . print_address ( )
o0OoOo0o0OOoO0 = ooOo00 . nonce_key
if 9 - 9: I11i . I11i . OoooooooOO
if ( lisp_map_notify_queue . has_key ( o0OoOo0o0OOoO0 ) ) :
ooOo00 = lisp_map_notify_queue . pop ( o0OoOo0o0OOoO0 )
if ( ooOo00 . retransmit_timer ) : ooOo00 . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( o0OoOo0o0OOoO0 ) )
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( ooOo00 . nonce_key , red ( I11i1i1i1iii , False ) ) )
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if 12 - 12: IiII / Ii1I
return
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
if 65 - 65: iII111i . oO0o
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 67 - 67: I1IiiI / iII111i / O0 % ooOoO0o - IiII / Ii1I
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
if 41 - 41: I1Ii111 * OoooooooOO / OoOoOO00 + OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
iIII11iiIiIiI = False
if ( group . is_null ( ) == False ) :
iIII11iiIiIiI = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
if ( iIII11iiIiIiI == False ) :
iIII11iiIiIiI = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 43 - 43: Oo0Ooo % I11i
if 53 - 53: OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if ( iIII11iiIiIiI ) :
I11 = lisp_print_eid_tuple ( eid , group )
IiII1II1I = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 40 - 40: o0oOOo0O0Ooo - OoOoOO00 - iIii1I11I1II1
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( I11 , False ) , s ,
# OoooooooOO + ooOoO0o * I1ii11iIi11i
IiII1II1I ) )
if 6 - 6: OoooooooOO % i1IIi % II111iiii + ooOoO0o / IiII + Ii1I
return ( iIII11iiIiIiI )
if 97 - 97: ooOoO0o / I1Ii111 * I1ii11iIi11i
if 83 - 83: Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 20 - 20: IiII
OOoO000o00000 = lisp_map_referral ( )
packet = OOoO000o00000 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 81 - 81: Oo0Ooo / I1Ii111
OOoO000o00000 . print_map_referral ( )
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
IiIIi1I1I11Ii = source . print_address ( )
iI1III = OOoO000o00000 . nonce
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . | |
arr[0])
# print('arr[1]: ', arr[1])
# print('arr[0].dtype', arr[:, 0].dtype)
# print('arr[1].dtype', arr[:, 1].dtype)
# print('arr[:, :2]', arr[:, :2])
# for s in arr:
# if isinstance(s[1], bool):
# print('s: ', s)
# print('is: ', [s for s in arr if isinstance(s[0], bool))
# players_set = np.unique(arr[:, :2].astype(str))
player_names = arr[:, :2].flatten()
players_set = np.where(player_names!=player_names, '', player_names).tolist()
# players_set = list(set(list(np.concatenate(arr[:, 0], arr[:, 1]))))
# player_count = len(players_set)
# print('player_count: ', player_count)
# initial_ratings = [elo.Rating() for _ in range(player_count)]
# zipped = zip(
# players_set,
# [elo.Rating() for _ in range(player_count)]
# )
# # print('zipped: ', zipped)
# players_elo = dict(zip(
# players_set,
# [elo.Rating() for _ in range(player_count)]
# )) # can use default dict here?
players_elo = {}
for player in players_set:
# print('player: ', player)
players_elo[player] = elo.Rating()
match_elos = np.zeros([arr.shape[0], 2])
elo_obj = elo.Elo_Rater()
# update player elo from every recorded match
for i in range(arr.shape[0]):
w_name, l_name = arr[i][:2]
if w_name != w_name or l_name != l_name:
match_elos[i] = np.nan, np.nan
continue
match_elos[i] = players_elo[w_name].value, players_elo[l_name].value
elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], arr[i][2], counts_538)
return match_elos[:,0], match_elos[:,1]
def generate_surface_elo_columns(df, surfaces, counts_538):
df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']
for surface in surfaces:
surface_df = df[(df['surface'] == surface) & (df['w_name'] == df['w_name']) & (df['l_name'] == df['l_name'])]
w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)
df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns
df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns
return df['w_sf_elo_538'], df['l_sf_elo_538']
'''
receives n x 4 array with columns 'w_name', 'l_name', 'is_gs', 'Date'
'''
def generateEloColumnsWithHistory(arr, counts_538):
playerEloHistory = defaultdict(list)
players_set = np.unique(arr[:, :2])
players_elo = dict(zip(
players_set,
[elo.Rating() for __ in range(len(players_set))]
)) # can use default dict here?
match_elos = np.zeros([arr.shape[0], 2])
elo_obj = elo.Elo_Rater()
# update player elo from every recorded match
for i in range(arr.shape[0]):
w_name, l_name = arr[i][:2]
isGrandSlam = arr[i][2]
date = datetime.datetime.strptime(arr[i][3], '%Y-%m-%d')
match_elos[i] = players_elo[w_name].value, players_elo[l_name].value
elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], 0, counts_538)
playerEloHistory[w_name].append({ 'date': date, 'newElo': players_elo[w_name].value, 'won': 1 })
playerEloHistory[l_name].append({ 'date': date, 'newElo': players_elo[l_name].value, 'won': 0 })
return match_elos[:,0], match_elos[:,1], playerEloHistory, players_elo
'''
return match dataframe with each player's pre-match elo ratings
'''
def generate_elo(df, counts_538=True):
df['w_elo_538'], df['l_elo_538'] = generate_elo_columns(np.array(df[['w_name', 'l_name', 'is_gs']]), True)
df['w_sf_elo_538'], df['l_sf_elo_538'] = generate_surface_elo_columns(df, ['Hard', 'Clay', 'Grass'], counts_538)
return df
# df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']
# for surface in ['Hard', 'Clay', 'Grass']:
# surface_df = df[df['surface'] == surface]
# w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)
# df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns
# df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns
# return df
'''
replace nan values with overall average array value
'''
def fill_nan_with_mean(arr):
mean = np.nanmean(arr)
arr[np.isnan(arr)] = mean
return arr
'''
collect 12-month s/r average performance by player
'''
def generate_52_stats(df,start_ind):
players_stats = {}
start_date = (df['match_year'][start_ind],df['match_month'][start_ind])
avg_stats = stats_52(start_date)
# set as prior so first row is not nan
avg_stats.update(start_date,(6.4,10,3.6,10))
# array w/ 2x1 arrays for each player's 12-month serve/return performance
match_52_stats = np.zeros([2,len(df),4])
avg_52_stats = np.zeros([len(df),4]) # avg tour-wide stats for serve, return
s_players_stats = {}
s_avg_stats = {}
for surface in ('Hard','Clay','Grass'):
s_players_stats[surface] = {}
s_avg_stats[surface] = stats_52((df['match_year'][0],df['match_month'][0]))
s_avg_stats[surface].update(start_date,(6.4,10,3.6,10))
s_match_52_stats = np.zeros([2,len(df),4])
s_avg_52_stats = np.zeros([len(df),4])
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
surface = row['surface']
date = row['match_year'],row['match_month']
avg_stats.set_month(date)
avg_52_stats[i] = np.sum(avg_stats.last_year,axis=0)
for k,label in enumerate(w_l):
if row[label+'_name'] not in players_stats:
players_stats[row[label+'_name']] = stats_52(date)
# store serving stats prior to match, update current month
players_stats[row[label+'_name']].set_month(date)
match_52_stats[k][i] = np.sum(players_stats[row[label+'_name']].last_year,axis=0) # all four stats per player
# update serving stats if not null
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
players_stats[row[label+'_name']].update(date,match_stats)
avg_stats.update(date,match_stats)
# repeat above process for surface-specific stats
if surface not in ('Hard','Clay','Grass'):
continue
s_avg_stats[surface].set_month(date)
s_avg_52_stats[i] = np.sum(s_avg_stats[surface].last_year,axis=0)
for k,label in enumerate(w_l):
if row[label+'_name'] not in s_players_stats[surface]:
s_players_stats[surface][row[label+'_name']] = stats_52(date)
# store serving stats prior to match, from current month
s_players_stats[surface][row[label+'_name']].set_month(date)
s_match_52_stats[k][i] = np.sum(s_players_stats[surface][row[label+'_name']].last_year,axis=0)
# update serving stats if not null
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
s_players_stats[surface][row[label+'_name']].update(date,match_stats)
s_avg_stats[surface].update(date,match_stats)
for k,label in enumerate(w_l):
df[label+'_52_swon'] = match_52_stats[k][:,0]
df[label+'_52_svpt'] = match_52_stats[k][:,1]
df[label+'_52_rwon'] = match_52_stats[k][:,2]
df[label+'_52_rpt'] = match_52_stats[k][:,3]
df[label+'_sf_52_swon'] = s_match_52_stats[k][:,0]
df[label+'_sf_52_svpt'] = s_match_52_stats[k][:,1]
df[label+'_sf_52_rwon'] = s_match_52_stats[k][:,2]
df[label+'_sf_52_rpt'] = s_match_52_stats[k][:,3]
with np.errstate(divide='ignore', invalid='ignore'):
df['avg_52_s'] = fill_nan_with_mean(np.divide(avg_52_stats[:,0],avg_52_stats[:,1]))
df['avg_52_r'] = fill_nan_with_mean(np.divide(avg_52_stats[:,2],avg_52_stats[:,3]))
df['sf_avg_52_s'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,0],s_avg_52_stats[:,1]))
df['sf_avg_52_r'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,2],s_avg_52_stats[:,3]))
return df
'''
Efron-Morris estimators for 52-week serve and return percentages
Calculates B_i coefficients in terms of service points
Feed any existing col where ['p0_'+col, 'p1_'+col] within df.columns
# TODO: you should be passing in the full column suffix after 'p0_'/'p1_'
'''
def generate_em_stats(df,cols):
for col in cols:
stat_history = np.concatenate([df['p0_'+col],df['p1_'+col]],axis=0)
n = int(len(stat_history)/2)
prefix = 'sf_52_' if 'sf' in col else '52_'
suffix = 'svpt' if '_s_' in col else 'rpt'
num_points = np.concatenate([df['p0_'+prefix+suffix],df['p1_'+prefix+suffix]])
p_hat = np.mean(stat_history)
sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))
tau2_hat = np.nanvar(stat_history)
B_i = sigma2_i/(tau2_hat+sigma2_i)
stat_history[stat_history!=stat_history] = p_hat
df['p0_' + col + '_EM'] = df['p0_' + col]+B_i[:n] * (p_hat - df['p0_' + col])
df['p1_' + col + '_EM'] = df['p1_' + col]+B_i[n:] * (p_hat - df['p1_' + col])
print(col, p_hat)
return df # ok if p_hats don't add up because they're avg of averages
'''
Efron-Morris estimators for 52-week serve and return percentages
Calculates B_i coefficients in terms of service points
Feed any existing col within df.columns
'''
def generate_em_stats_current(df,cols):
for col in cols:
stat_history = df[col]
num_points = df['52_svpt'] if col=='52_swon' else df['52_rpt']
p_hat = np.mean(stat_history)
sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))
tau2_hat = np.nanvar(stat_history)
B_i = sigma2_i/(tau2_hat+sigma2_i)
stat_history[stat_history!=stat_history] = p_hat
df[col+'_EM'] = df[col]+B_i*(p_hat-df[col])
print(col, p_hat)
return df # ok if p_hats don't add up because they're avg of averages
'''
use validate stats before calling statsClass.update() method
'''
def is_valid(arr):
return not np.isnan(arr).any()
'''
collects 12-month s/r stats relative to historical opponents
columns '52_s_adj','52_r_adj' represent how well a player
performs above average
'''
def generate_52_adj_stats(df,start_ind=0):
players_stats = {}
match_52_stats = np.zeros([2,len(df),2]) # 2x1 arrays for x_i, x_j's 12-month s/r performance
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
surface = row['surface']
date = row['match_year'],row['match_month']
avg_52_s, avg_52_r = row['avg_52_s'],row['avg_52_r']
match_stats = [[],[]]
# add new players to the dictionary
for k,label in enumerate(w_l):
if row[label+'_name'] not in players_stats:
players_stats[row[label+'_name']] = adj_stats_52(date)
# store pre-match adj stats
for k,label in enumerate(w_l):
players_stats[row[label+'_name']].set_month(date)
# fill in player's adjusted stats prior to start of match
match_52_stats[k][i] = players_stats[row[label+'_name']].adj_sr
# update serving stats if not null
if validate(row, label):
sv_stats = (row[label+'_swon'],row[label+'_svpt'],row[label+'_rwon'],row[label+'_rpt'])
# TODO: this is the troublesome line... could be extracting nan value from opponent
# TODO: also rewrite this so it's readable (plus with arrays not obvious at)
opp_r_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[1] + avg_52_r
opp_s_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[0] + avg_52_s
opp_stats = (opp_r_ablty * row[label + '_svpt'], opp_s_ablty * row[label + '_rpt'])
match_stats[k] = sv_stats + opp_stats
# update players' adjusted scores based on pre-match adjusted ratings
for k,label in enumerate(w_l):
# if is_valid(match_stats):
if validate(row, label) and is_valid(match_stats):
players_stats[row[label+'_name']].update(date,match_stats[k])
for k,label in enumerate(w_l):
df[label+'_52_s_adj'] = match_52_stats[k][:,0]
df[label+'_52_r_adj'] = match_52_stats[k][:,1]
return df
'''
generate delta between two players relative to shared opponent
delta_i^AB = (spw(A, C_i) - (1 - rpw(A, C_i))) - (spw(B, C_i) - (1 - rpw(B, C_i)))
'''
def generate_delta(p1_stats, p2_stats):
p1_s_pct, p1_r_pct = p1_stats[0]/float(p1_stats[1]), p1_stats[2]/float(p1_stats[3])
p2_s_pct, p2_r_pct = p2_stats[0]/float(p2_stats[1]), p2_stats[2]/float(p2_stats[3])
return (p1_s_pct - (1 - p1_r_pct)) - (p2_s_pct - (1 - p2_r_pct))
'''
return true if total service/return points both greater than zero
'''
def has_stats(last_year_stats):
return last_year_stats[1] > 0 and last_year_stats[3] > 0
'''
get opponents who have played a match in the past 12 months (more than 0 points)
'''
def get_opponents(player_d, player_name):
historical_opponents = player_d[player_name].history.keys()
return [opp for opp in historical_opponents if has_stats(player_d[player_name].history[opp])]
'''
compute serve/return parameters, given their common opponent history
'''
def generate_commop_params(player_d, player1, player2):
p1_opponents, p2_opponents = get_opponents(player_d, player1), get_opponents(player_d, player2)
common_opponents = np.intersect1d(p1_opponents, p2_opponents)
if len(common_opponents) == 0:
return [0]
match_deltas = np.zeros(len(common_opponents))
for i, comm_op in enumerate(common_opponents):
p1_match_stats = player_d[player1].history[comm_op]
p2_match_stats = player_d[player2].history[comm_op]
comm_op_delta = generate_delta(p1_match_stats, p2_match_stats)
match_deltas[i] = comm_op_delta
if np.isnan(comm_op_delta):
print('nan here: ', p1_match_stats, p2_match_stats, comm_op)
overall_delta = np.mean(match_deltas)
if np.isnan(overall_delta):
print('nan, match_deltas: ', match_deltas)
return match_deltas
'''
collect historical s/r common-opponent performance by player
'''
def generate_commop_stats(df, start_ind):
player_d = {}
match_52_stats = np.zeros([2,len(df), 2])
match_probs = np.zeros([len(df)])
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
for k, label in enumerate(w_l):
opponent_name = row[w_l[1-k]+'_name']
if row[label+'_name'] not in player_d:
player_d[row[label+'_name']] = commop_stats()
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
player_d[row[label+'_name']].update(match_stats, opponent_name)
# can compute common-opponent stats after current match stats inputted
if row['match_year'] >= COMMOP_START_YEAR: | |
obj.Proxy.getElements():
info = e.Proxy.getInfo()
if utils.isDraftObject(info.Part):
continue
shape = None
if utils.isVertex(info.Shape) or \
utils.isLinearEdge(info.Shape):
shape = info.Shape
ret.append(cls.Info(Part=info.Part,Shape=shape))
return ret
@classmethod
def hasFixedPart(cls,obj):
return len(obj.Proxy.getElements())>0
@classmethod
def lockElement(cls,info,solver):
ret = []
system = solver.system
isVertex = utils.isVertex(info.Shape)
if solver.isFixedElement(info.Part,info.Subname):
return ret
if not isVertex and utils.isDraftCircle(info.Part):
if solver.sketchPlane:
_c(solver,solver.getPartInfo(info),info.Subname,info.Shape)
else:
solver.getPartInfo(info,True,solver.group)
solver.addFixedElement(info.Part,info.Subname)
return ret
if not isVertex and not utils.isLinearEdge(info.Shape):
return ret
partInfo = solver.getPartInfo(info)
fixPoint = False
if isVertex:
names = [info.Subname]
if utils.isDraftCircle(info.Part):
_c(solver,partInfo,'Edge1',info.Shape)
solver.addFixedElement(info.Part,'Edge1')
elif utils.isDraftWire(info.Part):
fixPoint = True
names = utils.edge2VertexIndex(info.Part,info.Subname)
else:
names = [info.Subname+'.fp0', info.Subname+'.fp1']
nameTag = partInfo.PartName + '.' + info.Subname
for i,v in enumerate(utils.getVertexes(info.Shape)):
surfix = '.fp{}'.format(i)
system.NameTag = nameTag + surfix
# Create an entity for the transformed constant point
e1 = system.addPoint3dV(*info.Placement.multVec(v.Point))
# Get the entity for the point expressed in variable parameters
e2 = _p(solver,partInfo,names[i],v)
solver.addFixedElement(info.Part,names[i])
if i==0 or fixPoint:
# We are fixing a vertex, or a linear edge. Either way, we
# shall add a point coincidence constraint here.
e0 = e1
system.NameTag = nameTag + surfix
if system.sketchPlane and utils.isDraftObject(info.Part):
w = system.sketchPlane.entity
else:
w = 0
e = system.addPointsCoincident(e1,e2,w,group=solver.group)
system.log('{}: fix point {},{},{}',info.PartName,e,e1,e2)
else:
# The second point, so we are fixing a linear edge. We can't
# add a second coincidence constraint, which will cause
# over-constraint. We constraint the second point to be on
# the line defined by the linear edge.
#
# First, get an entity of the transformed constant line
system.NameTag = nameTag + '.fl'
l = system.addLineSegment(e0,e1)
system.NameTag = nameTag
# Now, constraint the second variable point to the line
e = system.addPointOnLine(e2,l,group=solver.group)
system.log('{}: fix line {},{}',info.PartName,e,l)
ret.append(e)
return ret
@classmethod
def prepare(cls,obj,solver):
ret = []
for element in obj.Proxy.getElements():
ret += cls.lockElement(element.Proxy.getInfo(),solver)
return ret
@classmethod
def check(cls,elements,_checkCount=False):
if not all([utils.isElement(info.Shape) for info in elements]):
raise RuntimeError('Constraint "{}" requires all children to be '
'of element (Vertex, Edge or Face)'.format(cls.getName()))
class BaseMulti(Base):
_id = -1
_entityDef = (_wa,)
@classmethod
def onRegister(cls):
assert(not cls._workplane)
assert(len(cls._entityDef)<=2)
@classmethod
def check(cls,elements,checkCount=False):
if cls._measure:
super(BaseMulti,cls).check(elements,checkCount)
return
if checkCount and len(elements)<2:
raise RuntimeError('Constraint "{}" requires at least two '
'elements'.format(cls.getName()))
count = min(len(elements),len(cls._entityDef))
for i,entityDef in enumerate(cls._entityDef[:count]):
info = elements[i]
msg = entityDef(None,info.Part,info.Subname,info.Shape)
if msg:
raise RuntimeError('Constraint "{}" requires the {} element '
'to be of {}'.format(cls.getName(),_ordinal[i],msg))
if len(elements)<=count:
return
i = len(cls._entityDef)
for info in elements[i:]:
msg = cls._entityDef[0](None,info.Part,info.Subname,info.Shape)
if msg:
raise RuntimeError('Constraint "{}" requires the {} element '
'onwards to all be of {}'.format(
cls.getName(),_ordinal[i],msg))
@classmethod
def prepare(cls,obj,solver):
if cls._measure:
return
func = cls.constraintFunc(obj,solver);
if not func:
return
props = cls.getPropertyValues(obj)
ret = []
if cls.canMultiply(obj):
elements = obj.Proxy.getElements()
if len(elements)<=1:
logger.warn('{} not enough elements',cstrName(obj))
return
firstInfo = elements[0].Proxy.getInfo(expand=True)
count = len(firstInfo)
if not count:
logger.warn('{} no first part shape',cstrName(obj))
return
dragPart = solver.getDragPart()
dragIndex = -1
if isinstance(dragPart,tuple) and \
dragPart[0]==firstInfo[0].Part[0] and \
solver.getArrayPartConstraintCount(dragPart)==1 and \
dragPart[1] < count:
dragIndex = dragPart[1]
idx = 0
for element in elements[1:]:
updates = []
info0Ref = None
infoRef = None
shapeRef = None
refIdx = -1
infos = element.Proxy.getInfo(expand=True)
# make sure the dragging part is picked as reference for
# coplanar shortcut updating
if dragIndex>=0:
for i,info in enumerate(infos):
if idx+i >= count:
break
info0 = firstInfo[idx+i]
if info0.Part[1] == dragIndex:
dragIndex = -1
info0Ref = solver.getPartInfo(info0)
infoRef = solver.getPartInfo(info)
shapeRef = info.Shape
refIdx = i
break
for i,info in enumerate(infos):
if idx >= count:
break
info0 = firstInfo[idx]
partInfo = solver.getPartInfo(info)
if solver.getArrayPartConstraintCount(info0.Part)!=1:
partInfo0 = solver.getPartInfo(info0)
elif not infoRef:
partInfo0 = solver.getPartInfo(info0)
info0Ref = partInfo0
infoRef = partInfo
shapeRef = info.Shape
elif i == refIdx:
partInfo0 = info0Ref
else:
# We can safely skip those coplanar edges if the
# part array element is involved in one and only one
# constraint (i.e. this one).
updates.append((info0,partInfo,info.Shape))
idx += 1
continue
e0 = cls._entityDef[0](
solver,partInfo0,info0.Subname,info0.Shape)
e = cls._entityDef[0](
solver,partInfo,info.Subname,info.Shape)
params = props + [e0,e]
solver.system.checkRedundancy(obj,partInfo0,partInfo)
h = func(*params,group=solver.group)
if isinstance(h,(list,tuple)):
ret += list(h)
else:
ret.append(h)
idx += 1
if updates:
info0Ref.Update.append((infoRef,shapeRef,updates))
return ret
parts = set()
ref = None
elements = []
for e in obj.Proxy.getElements():
info = e.Proxy.getInfo()
################################################################
# Note: Multiple elements from the same part makes sense in, e.g.
# PointsOnCircle
################################################################
# if info.Part in parts:
# logger.warn('{} skip duplicate parts {}',
# cstrName(obj),info.PartName)
# continue
parts.add(info.Part)
if solver.isFixedPart(info.Part):
if ref:
logger.warn('{} skip more than one fixed part {},{}',
cstrName(obj),info.PartName,ref.PartName)
continue
ref = info
elements.append(e)
if len(elements)<=1:
logger.warn('{} has no effective constraining element',
cstrName(obj))
return
e0 = None
e = None
info0 = None
idx0 = 1 if len(cls._entityDef)>1 else 0
for i,element in enumerate(elements):
info = element.Proxy.getInfo()
partInfo = solver.getPartInfo(info)
if i==idx0:
e0 = cls._entityDef[idx0](
solver,partInfo,info.Subname,info.Shape)
info0 = partInfo
else:
e = cls._entityDef[0](solver,partInfo,info.Subname,info.Shape)
if e0 and e:
if idx0:
params = props + [e,e0]
solver.system.checkRedundancy(obj,partInfo,info0)
else:
params = props + [e0,e]
solver.system.checkRedundancy(obj,info0,partInfo)
h = func(*params,group=solver.group)
if isinstance(h,(list,tuple)):
ret += list(h)
else:
ret.append(h)
return ret
class BaseCascade(BaseMulti):
@classmethod
def prepare(cls,obj,solver):
if not getattr(obj,'Cascade',True):
return super(BaseCascade,cls).prepare(obj,solver)
func = cls.constraintFunc(obj,solver);
if not func:
return
props = cls.getPropertyValues(obj)
prev = None
ret = []
for e in obj.Proxy.getElements():
info = e.Proxy.getInfo()
if not prev or prev.Part==info.Part:
prev = info
continue
prevInfo = solver.getPartInfo(prev)
e1 = cls._entityDef[0](solver,prevInfo,prev.Subname,prev.Shape)
partInfo = solver.getPartInfo(info)
e2 = cls._entityDef[0](solver,partInfo,info.Subname,info.Shape)
prev = info
if solver.isFixedPart(info.Part):
params = props + [e1,e2]
else:
params = props + [e2,e1]
solver.system.checkRedundancy(obj,prevInfo,partInfo)
h = func(*params,group=solver.group)
if isinstance(h,(list,tuple)):
ret += list(h)
else:
ret.append(h)
if not ret:
logger.warn('{} has no effective constraint',cstrName(obj))
return ret
class PlaneAlignment(BaseCascade):
_id = 37
_iconName = 'Assembly_ConstraintAlignment.svg'
_props = ['Cascade','Offset'] + _AngleProps
_tooltip = \
'Add a "{}" constraint to align planar faces of two or more parts.\n'\
'The faces become coplanar or parallel with an optional distance'
class PlaneCoincident(BaseCascade):
_id = 35
_iconName = 'Assembly_ConstraintCoincidence.svg'
_props = ['Multiply','Cascade','Offset','OffsetX','OffsetY'] + _AngleProps
_tooltip = \
'Add a "{}" constraint to conincide planar faces of two or more parts.\n'\
'The faces are coincided at their centers with an optional distance.'
class Attachment(BaseCascade):
_id = 45
_iconName = 'Assembly_ConstraintAttachment.svg'
_props = ['Multiply', 'Cascade']
_tooltip = \
'Add a "{}" constraint to attach two parts by the selected geometry\n'\
'elements. This constraint completely fixes the parts relative to each\n'\
'other.'
_entityDef = (_wa_no_check,)
class AxialAlignment(BaseMulti):
_id = 36
_entityDef = (_lna,)
_iconName = 'Assembly_ConstraintAxial.svg'
_props = ['Multiply'] + _AngleProps
_tooltip = 'Add a "{}" constraint to align edges/faces of two or\n'\
'more parts. The constraint acceps linear edges, which become\n'\
'colinear, and planar faces, which are aligned uses their surface\n'\
'normal axis, and cylindrical face, which are aligned using the\n'\
'axial direction. Different types of geometry elements can be mixed.'
class SameOrientation(BaseMulti):
_id = 2
_entityDef = (_n,)
_iconName = 'Assembly_ConstraintOrientation.svg'
_tooltip = 'Add a "{}" constraint to align faces of two or more parts.\n'\
'The planes are aligned to have the same orientation (i.e. rotation)'
class MultiParallel(BaseMulti):
_id = 291
_entityDef = (_lw,)
_iconName = 'Assembly_ConstraintMultiParallel.svg'
_props = _AngleProps
_tooltip = 'Add a "{}" constraint to make planar faces or linear edges\n'\
'of two or more parts parallel.'
class Angle(Base):
_id = 27
_entityDef = (_ln,_ln)
_workplane = True
_props = ["Angle","Supplement"]
_iconName = 'Assembly_ConstraintAngle.svg'
_tooltip = \
'Add a "{}" constraint to set the angle of planar faces or linear\n'\
'edges of two parts.'
@classmethod
def init(cls,obj):
infos = obj.Proxy.getElementsInfo()
proj = None
if len(infos) == 3:
proj = infos[2].Placement.Rotation.multiply(
utils.getElementRotation(infos[2].Shape))
obj.Angle = utils.getElementsAngle(infos[0].Shape,infos[1].Shape,
infos[0].Placement,infos[1].Placement,proj)
class Perpendicular(Base):
_id = 28
_entityDef = (_lw,_lw)
_workplane = True
_iconName = 'Assembly_ConstraintPerpendicular.svg'
_tooltip = \
'Add a "{}" constraint to make planar faces or linear edges of two\n'\
'parts perpendicular.'
@classmethod
def prepare(cls,obj,solver):
system = solver.system
e1,e2 = cls.getEntities(obj,solver)[:2]
isPlane = isinstance(e1,PlaneInfo),isinstance(e2,PlaneInfo)
if all(isPlane):
ret = system.addPerpendicular(
e1.normal.entity,e2.normal.entity,group=solver.group)
elif not any(isPlane):
ret = system.addPerpendicular(e1,e2,group=solver.group)
elif isPlane[0]:
ret = system.addParallel(e1.normal.entity,e2,group=solver.group)
else:
ret = system.addParallel(e1,e2.normal.entity,group=solver.group)
return ret
class PointsCoincident(Base):
_id = 1
_entityDef = (_p,_p)
_workplane = True
_iconName = 'Assembly_ConstraintPointCoincident.svg'
_tooltips = 'Add a "{}" constraint to conincide two | |
# Copyright Notice:
# Copyright 2016 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfishtool/blob/master/LICENSE.md
# redfishtool: redfishtoolTransport.py
#
# Contents:
# 1. Class RfSessionAuth -- holds auto-created session Auth info. 'requests' calls to get credentials
# 2. Class RfTransport -- has the generic functions to send/receive http requests, generic print functions, etc
# - transport object variables used to pass transport parameters from main to cmdTable and subcommand objects
# - getApiScheme function -- generates proper scheme (http|https) based on input options and type of API
# - getVersionAndSetRootPath function -- executes GET /redfish with optional retry loop to negotiate protocol ver
# between this program and remote service, and creates the path of the root object
# - rftSendRecvRequest function--general function to send/receive Requests. handles exceptions, retries, error handling, headers
# handles proper joining of relative urls, selecting proper Auth and Scheme specified by user, etc
# - getPropFromDict --extracts a single property from a dict
# - getVersions -- function to return the service versions: GET ^/redfish
# - printVerbose -- common function used to print based on verbose level
# - printErr -- common function to print errors
# - printStatusErr4xx -- expands status_codes >400 to include description eg Unauthorized
# - rfSessionLogin, rfSessionDelete -- function to create or delete session if -ASession is selected (default)
# - rfCleanup -- called at end before returning. Deletes auto-created sessions
# - getPathBy --function that walks collection looking for a specific instance
# - getLevel2ResourceById -- searches a 2nd level collection (Processors) for -l urlLink, -m prop:val
# - listCollection -- create a list of a collection members including Id, <prop>, <rpath> of each member
# this is used by Systems and Chassis... to implement 'list' redfishtool command
# - getAllCollectionMembers -- given a url to a collection, get it, and then get all members,
# return dict with all members expanded
# - patchResource - generic patch function-handles etags and re-reading patched resource if response is 204
# - parseOdataType --parse the @odata.type property of a resource into Namespace, VersionString, ResourceType
#
# **Reference links for main requests
# https://github.com/kennethreitz/requests
#
import os
import re
import requests
import json
import sys
import socket
import time
import ipaddress
from datetime import datetime, timedelta
from dateutil import parser
from urllib.parse import urljoin, urlparse, urlunparse
from requests.auth import HTTPBasicAuth, AuthBase
from .ServiceRoot import RfServiceRoot
class RfSessionAuth(AuthBase):
def __init__(self,authToken):
self.authToken=authToken
#print("INIT SESSION AUTH")
def __call__(self, r):
r.headers['X-Auth-Token']=self.authToken
#print("Call SESSION AUTH")
return(r)
class RfTransport():
def __init__(self):
# constant parameters-- these dont change and are not updated
self.program="redfishtool" # program name (in case we want to change it)
self.version="1.1.3" # this redfishtool version
self.releaseDate="05/01/2020" # release date for this version of redfishtool
self.downloadFrom="https://github.com/DMTF/Redfishtool" # where to find redfishtool
self.magic="12345" # used for debug to test for a known parameter in this object
self.UNAUTHENTICATED_API=1 # unauthenticated API that doesn't send credentials in body data
self.AUTHENTICATED_API=2 # authenticated API that doesn't send credentials in body data
self.AUTHENTICATED_WITH_CREDENTIALS_API=3 # Authenticated API that sends credentials eg passwd update, add user
self.UNAUTHENTICATED_WITH_CREDENTIALS_API=4 # session login (unauthenticated) but sends credentials
self.authValidValues=["None", "Basic", "Session"]
self.secureValidValues=["Never", "IfSendingCredentials", "IfLoginOrAuthenticatedApi", "Always"]
self.supportedVersions=["v1"] # list of RedfishProtocolVersions that this program supports
self.MaxNextLinks=10 # max number of requests allowed with NextLink
self.dfltPatchPostPutHdrs = {'OData-Version': '4.0', 'Content-Type': 'application/json', 'Accept': 'application/json' }
self.dfltGetDeleteHeadHdrs = {'Accept': 'application/json', 'OData-Version': '4.0' }
# options and argument read from commandline options
# these are all set or updated by Main and remain constant for all APIs called for the cmd
self.verbose=0
self.status=0
self.help=False
self.quiet=False
self.user=""
self.password=""
self.rhost=None
self.token=None
self.protocolVer="v1"
self.auth="Basic" # or "Session" using Basic as default now
self.timeout=10 # http transport timeout in seconds, stored as int here
self.checkProtocolVer=False # if -C option, then we need to check/verify the protocol ver. dflt=false
self.blocking=True
# more option parsing variables
self.prop=None
self.requestMethod=None #used by raw subcommand
self.requestData=None #used by raw subcommand
self.Id=None
self.firstOptn=False
self.gotIdOptn=False
self.IdOptnCount=0
self.gotPropOptn=False
self.oneOptn=False
self.allOptn=False
self.gotMatchOptn=False
self.matchProp=None
self.matchValue=None
self.gotEntriesOptn = False
self.IdLevel2=None
self.gotIdLevel2Optn=False
self.IdLevel2OptnCount=0
self.gotMatchLevel2Optn=False
self.matchLevel2Prop=None
self.matchLevel2Value=None
self.linkLevel2=None # -l <link> or --link=<link>
self.Link=None # -L <Link> or --Link=<link>
self.configFile=""
self.secure="IfLoginOrAuthenticatedApi" #Never
self.waitTime=3
self.waitNum=1
self.headers=None
self.dbgFlag=0
self.subcommand=""
self.subcommandArgv=""
# transport parameters -- set by transport based on options and GetVersions
# these remain constant for all APIs called for the command
self.rhostVersions=None
self.rootPath=None
self.rootUri=None
self.rootResponseDict=None
self.rhostSupportedVersions=None
self.versionToUse=None
# API parameters that are calculated for each (multiple) API call used to execute the cmd
self.scheme=None #not used any longer
self.scheme0=None #not used any longer
self.apiType=None
#self.sessionId=None
# addl session login parameters
self.sessionId=None
self.sessionLink=None
self.authToken=None
self.cleanupOnExit=True
# measured execution time
self.elapsed=None
requests.packages.urllib3.disable_warnings()
# calculate the user-specified minimum security scheme based on APItype and --Secure options
# usage: userSpecifiedScheme=rft.getApiScheme(apiType)
# self.secureValidValues=["IfSendingCredentials", "IfLoginOrAuthenticatedApi", "Always", "Never"]
def getApiScheme(self,apiTypeIn):
scheme=None
if( self.secure == "Always" ):
scheme="https"
elif( self.secure == "Never" ):
scheme="http"
elif( (self.secure == "IfSendingCredentials") and
( (apiTypeIn==self.AUTHENTICATED_WITH_CREDENTIALS_API) or
(apiTypeIn==self.UNAUTHENTICATED_WITH_CREDENTIALS_API) or
( (apiTypeIn==self.AUTHENTICATED_API) and (self.auth == "Basic") ) ) ):
scheme="https"
elif( (self.secure=="IfLoginOrAuthenticatedApi") and
( (apiTypeIn==self.AUTHENTICATED_API) or
(apiTypeIn==self.UNAUTHENTICATED_WITH_CREDENTIALS_API) )):
scheme="https"
else:
scheme="http"
#print("else HTTP dflt")
return(scheme) #return ok
def getVersionsAndSetRootPath(self,rft,forceCheckProtocolVer=False):
# Read the Redfish Versions API (/redfish) to determine which protocol versions the service supports
# The proper ServiceRoot Path returned for each protocol version eg: { "v1": "/redfish/v1" }.
# If self.redfishProtocolVersion="Latest" (which is the default), we will select the latest version
# that is supported by both the remote redfish service AND this program.
# If the -R <redfishVer> option is called where the user specifies a version to use,
# we must verify that the remote redfish client supports that version and that this program supports it
# Initially, only "v1" is specified, so this program and services should all support only v1.
# But it is important that client code be coded to negotiate properly to be compatible with future services
# The versions supported by this program are in a list supportedVersions=["v1",...]
# If the -W <waitNum>:<waitTime> was specified with waitNum > 1, then we will loop executing the /redfish
# API up to waitNum times with http "connection" timeout=waitTime for the service to respond
# Note that we will always send at least one request to /redfish API even if waitNum=0.
# Waiting for the service to be up this way can aid in sending commands to services connected through
# shared NICs where the network path can goes away for a few seconds as the host OS boots and NICs
# reset and authenticate with switches. If we wait until we have a connection to start, most false
# failures are avoided (although the connection can also go away during cmd exec-but that window is smaller
rft.printVerbose(5,"getVersionsAndRootPath: read versions from rhost")
# if already executed, just return
if( rft.rootPath is not None):
rft.printVerbose(5,"Transport.getRootPath: path already exists")
#return(0,None,False,None)
if( rft.rhost is None):
rft.printErr("Transport: -r rHost was not specified and is required by this command. aborting")
return(5,None,False,None)
# if the checkProtocol flag is not set true, dont query rhost for /redfish version
# just use what was passed in with -R <redfishVersion> or the default "v1"
if( (rft.checkProtocolVer is False) and (forceCheckProtocolVer is not True) ):
# If here, checkProtocolVer is false. we will generate the rootURL and hope for the best
# This saves additional Get /redfish query that 99.9% of time is ok
# the Get Versions API (GET /redfish) calls the routine with forceCheckProtocolVer=True
rft.rootPath=urljoin("/redfish/", (rft.protocolVer + "/") )
#id of protocolVersion is v1, rft.rootPath="/redfish/v1/"
# calculate the rootUri including scheme,rhost,rootPath properly
scheme=rft.getApiScheme(rft.UNAUTHENTICATED_API)
rhost = rft.rhost
try:
if ipaddress.ip_address(rhost).version == 6:
rhost = '[{}]'.format(rhost)
except ValueError:
pass
scheme_tuple=[scheme, rhost, rft.rootPath, "","",""]
rootUrl=urlunparse(scheme_tuple)
rft.rootUri=rootUrl
# save parameters
rft.rhostSupportedVersions=None
rft.versionToUse=rft.protocolVer
rft.printVerbose(5,"Transport.getRootPath: protocolVer to use={}, rootPath={}".format(rft.versionToUse, rft.rootPath))
return(0,None,False,None) # return ok
# create scheme based on input parameters and apiType(set here) using setApiScheme() function above.
scheme=rft.getApiScheme(rft.UNAUTHENTICATED_API)
#define header and put the full URL together
hdrs = dict(rft.dfltGetDeleteHeadHdrs)
scheme_tuple=[scheme, rft.rhost, "/redfish", "","",""]
url=urlunparse(scheme_tuple) # url= "http[s]://<rhost>[:<port>]/redfish"
rft.printVerbose(5,"Transport.getRootPath: url={}".format(url))
# now send request to rhost, with retries based on -W <waitNum>:<waitTime> | |
<filename>tests/unit/protocol/transport/reliablebuffers.py<gh_stars>0
"""Test the protocol.transport.reliablebuffers module."""
# Builtins
# Packages
from phylline.links.clocked import LinkClockRequest
from phylline.links.events import LinkException
from phylline.util.logging import hex_bytes
from phylline.util.timing import Clock
from phyllo.protocol.communication import DATA_TYPES
from phyllo.protocol.transport.reliablebuffers import GBNSender, ReliableBufferLink
from phyllo.protocol.transport.reliablebuffers import (
ReliableBuffer, ReliableBufferFlags, ReliableBufferHeader
)
def assert_payload_contents(queue, value_min, value_max):
"""Check whether the payloads of queued reliable_buffers are in the specified range."""
queue = list(queue)
assert len(queue) == value_max - value_min + 1
for reliable_buffer in queue:
assert reliable_buffer['reliable_buffer'].payload[0] >= value_min
assert reliable_buffer['reliable_buffer'].payload[0] <= value_max
def test_gbn_sender():
"""Test GBNSender in normal behavior."""
print('Testing GBN Sender...')
clock = Clock(time=0)
sender = GBNSender(clock)
print('Testing send queue:')
assert sender.counter['send_flushed'] == 0
assert len(list(sender.flush_send_queue())) == 0
assert sender.counter['send_flushed'] == 0
assert sender.counter['send_queued'] == 0
for i in range(sender.SENDER_WINDOW_SIZE - 1):
dummy_reliable_buffer = ReliableBuffer(payload=bytes([i]))
sender.send(dummy_reliable_buffer)
assert sender.send_queue_availability
assert sender.counter['send_queued'] == i + 1
assert sender.counter['availability_filled'] == 0
sender.send(ReliableBuffer(payload=bytes([i + 1])))
assert sender.counter['send_queued'] == sender.SENDER_WINDOW_SIZE
assert sender.counter['send_availability_filled'] == 1
assert sender.send_queue_availability == 0
assert len(sender.send_queue) == sender.SENDER_WINDOW_SIZE
print(
'Expect a warning about over-stuffed send queue with reliable_buffer of '
'payload {}:'.format(hex_bytes([i + 2]))
)
assert sender.counter['send_availability_exceeded'] == 0
sender.send(ReliableBuffer(payload=bytes([i + 2])))
assert sender.counter['send_queued'] == sender.SENDER_WINDOW_SIZE + 1
assert sender.counter['send_availability_exceeded'] == 1
assert sender.send_queue_availability == 0
assert len(sender.send_queue) == sender.SENDER_WINDOW_SIZE + 1
print('Testing send queue transmission:')
assert sender.counter['send_flushed'] == 0
assert sender.counter['in_flight_queued'] == 0
assert_payload_contents(
sender.flush_send_queue(), 0, sender.SENDER_WINDOW_SIZE - 1
)
assert sender.counter['send_emptied'] == 0
assert sender.counter['send_flushed'] == sender.SENDER_WINDOW_SIZE
assert sender.counter['in_flight_queued'] == sender.SENDER_WINDOW_SIZE
assert sender.counter['in_flight_availability_filled'] == 1
assert len(sender.in_flight_queue) == sender.SENDER_WINDOW_SIZE
assert sender.in_flight_queue_availability == 0
assert len(sender.send_queue) == 1
assert sender.send_queue_availability == sender.SENDER_WINDOW_SIZE - 1
print('Testing in-flight queue timeout:')
assert not sender.retransmit_from_timeout
clock.update(time=sender.send_timeout)
assert sender.retransmit_from_timeout
assert sender.counter['in_flight_resent'] == 0
assert len(list(sender.resend_in_flight('timeout'))) == sender.SENDER_WINDOW_SIZE
assert sender.counter['in_flight_resent'] == sender.SENDER_WINDOW_SIZE
assert not sender.retransmit_from_timeout
clock.update(time=2 * sender.send_timeout)
assert sender.retransmit_from_timeout
assert len(list(sender.resend_in_flight('timeout'))) == sender.SENDER_WINDOW_SIZE
assert sender.counter['in_flight_resent'] == 2 * sender.SENDER_WINDOW_SIZE
assert not sender.retransmit_from_timeout
print('Testing valid ACK:')
dummy_ack = ReliableBuffer(
header=ReliableBufferHeader(ack_num=1, flags=ReliableBufferFlags(ack=True)),
payload=bytes([128])
)
assert not sender.retransmit_from_received(dummy_ack)
assert sender.counter['in_flight_emptied'] == 0
sender.to_receive(dummy_ack)
assert sender.counter['acknowledge'] == 1
assert sender.last_acknowledged == 1
assert len(sender.in_flight_queue) == sender.SENDER_WINDOW_SIZE - 1
assert_payload_contents(sender.in_flight_queue, 1, sender.SENDER_WINDOW_SIZE - 1)
assert sender.counter['send_emptied'] == 0
assert_payload_contents(
sender.flush_send_queue(), sender.SENDER_WINDOW_SIZE, sender.SENDER_WINDOW_SIZE
)
assert sender.counter['send_emptied'] == 1
assert sender.counter['send_flushed'] == sender.SENDER_WINDOW_SIZE + 1
assert sender.counter['in_flight_queued'] == sender.SENDER_WINDOW_SIZE + 1
assert sender.counter['send_availability_filled'] == 2
assert sender.counter['in_flight_emptied'] == 0
assert len(sender.in_flight_queue) == sender.SENDER_WINDOW_SIZE
assert sender.in_flight_queue_availability == 0
assert len(sender.send_queue) == 0
assert sender.send_queue_availability == sender.SENDER_WINDOW_SIZE
print('Testing invalid ACK behind:')
print('Expect an error about unexpected ACK {}:'.format(0))
dummy_ack = ReliableBuffer(
header=ReliableBufferHeader(ack_num=0, flags=ReliableBufferFlags(ack=True)),
payload=bytes([128])
)
assert not sender.retransmit_from_received(dummy_ack)
assert sender.counter['acknowledge_unexpected'] == 0
sender.to_receive(dummy_ack)
assert sender.counter['acknowledge_unexpected'] == 1
assert sender.counter['acknowledge'] == 1
assert sender.last_acknowledged == 1
assert len(sender.in_flight_queue) == sender.SENDER_WINDOW_SIZE
print('Testing invalid ACK ahead:')
print('Expect an error about unexpected ACK {}:'.format(100))
dummy_ack = ReliableBuffer(
header=ReliableBufferHeader(ack_num=100, flags=ReliableBufferFlags(ack=True)),
payload=bytes([128])
)
assert not sender.retransmit_from_received(dummy_ack)
assert sender.counter['acknowledge_unexpected'] == 1
sender.to_receive(dummy_ack)
assert sender.counter['acknowledge_unexpected'] == 2
assert sender.counter['acknowledge'] == 1
assert sender.last_acknowledged == 1
assert len(sender.in_flight_queue) == sender.SENDER_WINDOW_SIZE
print('Testing NAK:')
for ack_num in range(sender.SEQUENCE_NUMBER_SPACE):
assert sender.retransmit_from_received(ReliableBuffer(
header=ReliableBufferHeader(
ack_num=ack_num, flags=ReliableBufferFlags(ack=True, nak=True)
), payload=bytes([128])
))
assert not sender.retransmit_from_received(ReliableBuffer(
header=ReliableBufferHeader(ack_num=1, flags=ReliableBufferFlags(nak=True)),
payload=bytes([128])
))
print('Testing valid multiple ACK:')
dummy_ack = ReliableBuffer(
header=ReliableBufferHeader(ack_num=9, flags=ReliableBufferFlags(ack=True)),
payload=bytes([128])
)
assert not sender.retransmit_from_received(dummy_ack)
assert sender.counter['in_flight_emptied'] == 0
assert sender.counter['acknowledge'] == 1
sender.to_receive(dummy_ack)
assert sender.counter['acknowledge'] == 9
assert sender.last_acknowledged == 9
assert len(sender.in_flight_queue) == 0
assert sender.counter['send_emptied'] == 1
assert len(sender.send_queue) == 0
assert sender.in_flight_queue_availability == sender.SENDER_WINDOW_SIZE
assert len(list(sender.flush_send_queue())) == 0
assert sender.counter['send_emptied'] == 1
assert sender.counter['send_flushed'] == sender.SENDER_WINDOW_SIZE + 1
assert sender.counter['in_flight_queued'] == sender.SENDER_WINDOW_SIZE + 1
assert sender.counter['send_availability_filled'] == 2
assert sender.counter['in_flight_emptied'] == 1
assert len(sender.in_flight_queue) == 0
assert sender.in_flight_queue_availability == sender.SENDER_WINDOW_SIZE
assert len(sender.send_queue) == 0
assert sender.send_queue_availability == sender.SENDER_WINDOW_SIZE
# TODO: test rollover
def assert_send_reliable_buffer_ranges(
to_send, sequence_min, sequence_max, payload_type,
payload_first=None, payload_second_min=None, payload_second_max=None
):
"""Check whether reliable buffers exactly cover the specified ranges."""
if payload_second_min is not None and payload_second_max is not None:
assert len(to_send) == payload_second_max - payload_second_min + 1
assert to_send[0].data[0] == sequence_min
assert to_send[-1].data[0] == sequence_max
for (i, send_buffer) in enumerate(to_send):
assert send_buffer.data[0] == (sequence_min + i) % GBNSender.SEQUENCE_NUMBER_SPACE
assert send_buffer.data[3] == payload_type
if payload_first is not None:
send_buffer.data[4] == payload_first
if payload_second_min is not None:
assert send_buffer.data[5] == (payload_second_min + i) % 256
def assert_receive_reliable_buffer_ranges(
receive, sequence_min, sequence_max, type,
payload_first, payload_second_min, payload_second_max
):
"""Check whether reliable buffers exactly cover the specified ranges."""
assert len(receive) == payload_second_max - payload_second_min + 1
assert receive[0].context['header'].seq_num == sequence_min
assert receive[-1].context['header'].seq_num == sequence_max
for (i, receive_reliable_buffer) in enumerate(receive):
assert receive_reliable_buffer.context['header'].seq_num == (
(sequence_min + i) % GBNSender.SEQUENCE_NUMBER_SPACE
)
assert receive_reliable_buffer.context['header'].type == type
receive_reliable_buffer.data[0] == payload_first
assert receive_reliable_buffer.data[1] == (payload_second_min + i) % 256
def test_reliable_buffer_link():
"""Test ReliableBufferLink in normal behavior."""
print('Testing ReliableBuffer Link...')
reliable_buffer_counter = 0
reliable_buffer_link = ReliableBufferLink()
print(
'Requesting to send {} reliable buffers...'
.format(2 * GBNSender.SENDER_WINDOW_SIZE)
)
for i in range(2 * GBNSender.SENDER_WINDOW_SIZE):
reliable_buffer_link.send(bytes([0, reliable_buffer_counter]))
reliable_buffer_counter += 1
assert (
reliable_buffer_link.sender_counter['send_reliable_buffer']
== 2 * GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['send_flushed']
== GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['send_queued']
== 2 * GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['send_availability_filled'] == 1
)
assert (
reliable_buffer_link.arq_sender.counter['send_availability_exceeded'] == 0
)
assert (
reliable_buffer_link.sender_counter['to_send_in_flight']
== GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['in_flight_queued']
== GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['in_flight_availability_filled'] == 1
)
assert not reliable_buffer_link.arq_sender.send_queue_availability
assert reliable_buffer_link.has_to_send()
to_send = list(reliable_buffer_link.to_send_all())
assert len(to_send) == GBNSender.SENDER_WINDOW_SIZE + 1
assert isinstance(to_send[1], LinkClockRequest)
assert to_send[1].requested_time == reliable_buffer_link.arq_sender.send_timeout
del to_send[1]
assert_send_reliable_buffer_ranges(
to_send, 0, GBNSender.SENDER_WINDOW_SIZE - 1, DATA_TYPES[('bytes', 'buffer')],
0, 0, GBNSender.SENDER_WINDOW_SIZE - 1
)
print('Requesting to send another reliable buffer to over-stuff the send queue...')
print(
'Expect a warning about over-stuffed send queue with reliable buffer '
'of payload {}:'.format(hex_bytes([reliable_buffer_counter]))
)
reliable_buffer_link.send(bytes([0, reliable_buffer_counter]))
reliable_buffer_counter += 1
assert (
reliable_buffer_link.sender_counter['send_reliable_buffer']
== reliable_buffer_counter
)
assert (
reliable_buffer_link.arq_sender.counter['send_queued']
== reliable_buffer_counter
)
assert (
reliable_buffer_link.arq_sender.counter['send_availability_exceeded'] == 1
)
print('Letting in-flight reliable buffers time out...')
assert not reliable_buffer_link.has_to_send()
reliable_buffer_link.update_clock(reliable_buffer_link.arq_sender.send_timeout)
assert (
reliable_buffer_link.clock.time
== reliable_buffer_link.arq_sender.send_timeout
)
assert reliable_buffer_link.sender_counter['timeout_retransmission'] == 1
assert (
reliable_buffer_link.sender_counter['to_send_retransmission']
== GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['in_flight_resent']
== GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.sender_counter['to_send_in_flight']
== GBNSender.SENDER_WINDOW_SIZE
)
assert reliable_buffer_link.has_to_send()
to_send = list(reliable_buffer_link.to_send_all())
assert len(to_send) == GBNSender.SENDER_WINDOW_SIZE + 1
assert isinstance(to_send[-1], LinkClockRequest)
assert (
to_send[-1].requested_time
== 2 * reliable_buffer_link.arq_sender.send_timeout
)
del to_send[-1]
assert_send_reliable_buffer_ranges(
to_send, 0, GBNSender.SENDER_WINDOW_SIZE - 1, DATA_TYPES[('bytes', 'buffer')],
0, 0, GBNSender.SENDER_WINDOW_SIZE - 1
)
print('Receiving a valid ACK...')
ack = ReliableBuffer(
header=ReliableBufferHeader(
seq_num=0, ack_num=1, flags=ReliableBufferFlags(ack=True),
type=DATA_TYPES[('transport', 'validated_datagram')]
), payload=bytes([255, 0])
)
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == 0
reliable_buffer_link.to_receive(ack.buffer)
# Check receive
assert reliable_buffer_link.receiver_counter['to_receive_reliable_buffer'] == 1
assert reliable_buffer_link.receiver_counter['unexpected_reliable_buffer'] == 0
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == 1
assert reliable_buffer_link.arq_sender.counter['acknowledge_rollover'] == 0
assert reliable_buffer_link.arq_sender.counter['acknowledge_unexpected'] == 0
assert reliable_buffer_link.arq_sender.counter['in_flight_emptied'] == 0
assert reliable_buffer_link.has_receive()
receive = list(reliable_buffer_link.receive_all())
assert len(receive) == 1
assert_receive_reliable_buffer_ranges(
receive, 0, 0, DATA_TYPES[('transport', 'validated_datagram')], 255, 0, 0
)
# Check to_send
assert reliable_buffer_link.receiver_counter['requested_retransmission'] == 0
assert reliable_buffer_link.receiver_counter['to_send_retransmission'] == 0
assert reliable_buffer_link.receiver_counter['to_send_in_flight'] == 1
assert (
reliable_buffer_link.arq_sender.counter['in_flight_queued']
== GBNSender.SENDER_WINDOW_SIZE + 1
)
assert (
reliable_buffer_link.arq_sender.counter['in_flight_availability_filled'] == 2
)
assert reliable_buffer_link.has_to_send()
to_send = list(reliable_buffer_link.to_send_all())
assert_send_reliable_buffer_ranges(
to_send, 8, 8, DATA_TYPES[('bytes', 'buffer')], 0, 8, 8
)
print('Letting in-flight reliable buffers time out...')
assert not reliable_buffer_link.has_to_send()
reliable_buffer_link.update_clock(
2 * reliable_buffer_link.arq_sender.send_timeout
)
assert (
reliable_buffer_link.clock.time
== 2 * reliable_buffer_link.arq_sender.send_timeout
)
assert reliable_buffer_link.sender_counter['timeout_retransmission'] == 2
assert (
reliable_buffer_link.sender_counter['to_send_retransmission']
== 2 * GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.arq_sender.counter['in_flight_resent']
== 2 * GBNSender.SENDER_WINDOW_SIZE
)
assert (
reliable_buffer_link.sender_counter['to_send_in_flight']
== GBNSender.SENDER_WINDOW_SIZE
)
assert reliable_buffer_link.has_to_send()
to_send = list(reliable_buffer_link.to_send_all())
assert len(to_send) == GBNSender.SENDER_WINDOW_SIZE + 1
assert isinstance(to_send[-1], LinkClockRequest)
assert (
to_send[-1].requested_time
== 3 * reliable_buffer_link.arq_sender.send_timeout
)
del to_send[-1]
assert_send_reliable_buffer_ranges(
to_send, 1, GBNSender.SENDER_WINDOW_SIZE, DATA_TYPES[('bytes', 'buffer')],
0, 1, GBNSender.SENDER_WINDOW_SIZE
)
print('Receiving an invalid ACK behind...')
ack = ReliableBuffer(
header=ReliableBufferHeader(
seq_num=1, ack_num=0, flags=ReliableBufferFlags(ack=True)
), payload=bytes([255, 1])
)
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == 1
assert reliable_buffer_link.receiver_counter['unexpected_reliable_buffer'] == 0
print('Expect two errors about unexpected ACK {}:'.format(0))
reliable_buffer_link.to_receive(ack.buffer)
# Check receive
assert reliable_buffer_link.receiver_counter['to_receive_reliable_buffer'] == 2
assert reliable_buffer_link.receiver_counter['unexpected_reliable_buffer'] == 1
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == 1
assert reliable_buffer_link.arq_sender.counter['acknowledge_rollover'] == 0
assert reliable_buffer_link.arq_sender.counter['acknowledge_unexpected'] == 1
assert reliable_buffer_link.arq_sender.counter['in_flight_emptied'] == 0
assert not reliable_buffer_link.has_receive()
# Check to_send
assert reliable_buffer_link.receiver_counter['requested_retransmission'] == 0
assert reliable_buffer_link.receiver_counter['to_send_retransmission'] == 0
assert reliable_buffer_link.receiver_counter['to_send_in_flight'] == 1
print('Receiving an invalid ACK ahead...')
ack = ReliableBuffer(
header=ReliableBufferHeader(
seq_num=2, ack_num=255, flags=ReliableBufferFlags(ack=True)
), payload=bytes([255, 2])
)
print('Expect two errors about unexpected ACK {}:'.format(255))
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == 1
reliable_buffer_link.to_receive(ack.buffer)
# Check receive
assert reliable_buffer_link.receiver_counter['to_receive_reliable_buffer'] == 3
assert reliable_buffer_link.receiver_counter['unexpected_reliable_buffer'] == 2
assert reliable_buffer_link.arq_sender.counter['acknowledge'] == | |
<reponame>mohamad-amin/neural-tangents
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/empirical.py`."""
from functools import partial
import operator
from absl.testing import absltest
from absl.testing import parameterized
from jax import jit, tree_map, tree_multimap
from jax import test_util as jtu
from jax.config import config
import jax.numpy as np
import jax.random as random
from neural_tangents import stax
from neural_tangents.utils import empirical
from neural_tangents.utils import test_utils
from neural_tangents.utils import utils
config.parse_flags_with_absl()
config.update('jax_numpy_rank_promotion', 'raise')
TAYLOR_MATRIX_SHAPES = [(3, 3), (4, 4)]
TAYLOR_RANDOM_SAMPLES = 10
FLAT = 'FLAT'
POOLING = 'POOLING'
CONV = 'CONV'
TRAIN_SHAPES = [(4, 4), (4, 8), (8, 8), (6, 4, 4, 3), (4, 4, 4, 3),
(4, 4, 4, 3)]
TEST_SHAPES = [(2, 4), (6, 8), (16, 8), (2, 4, 4, 3), (2, 4, 4, 3),
(2, 4, 4, 3)]
NETWORK = [FLAT, FLAT, FLAT, FLAT, POOLING, CONV]
OUTPUT_LOGITS = [1, 2, 3]
CONVOLUTION_CHANNELS = 8
test_utils.update_test_tolerance()
def _build_network(input_shape, network, out_logits):
if len(input_shape) == 1:
assert network == FLAT
return stax.Dense(out_logits, W_std=2.0, b_std=0.5)
elif len(input_shape) == 3:
if network == POOLING:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.GlobalAvgPool(), stax.Dense(out_logits, W_std=2.0, b_std=0.5))
elif network == CONV:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (1, 2), W_std=1.5, b_std=0.1),
stax.Relu(),
stax.Conv(CONVOLUTION_CHANNELS, (3, 2), W_std=2.0, b_std=0.05),
)
elif network == FLAT:
return stax.serial(
stax.Conv(CONVOLUTION_CHANNELS, (3, 3), W_std=2.0, b_std=0.05),
stax.Flatten(), stax.Dense(out_logits, W_std=2.0, b_std=0.5))
else:
raise ValueError('Unexpected network type found: {}'.format(network))
else:
raise ValueError('Expected flat or image test input.')
def _kernel_fns(key,
input_shape,
network,
out_logits,
diagonal_axes,
trace_axes,
vmap_axes=None):
init_fn, f, _ = _build_network(input_shape, network, out_logits)
_, params = init_fn(key, (-1,) + input_shape)
implicit_kernel_fn = jit(empirical._empirical_implicit_ntk_fn(f,
trace_axes,
diagonal_axes,
vmap_axes))
direct_kernel_fn = jit(empirical._empirical_direct_ntk_fn(f,
trace_axes,
diagonal_axes,
vmap_axes))
nngp_kernel_fn = jit(empirical.empirical_nngp_fn(f,
trace_axes,
diagonal_axes))
return (partial(implicit_kernel_fn, params=params),
partial(direct_kernel_fn, params=params),
partial(nngp_kernel_fn, params=params))
KERNELS = {}
for o in OUTPUT_LOGITS:
KERNELS['empirical_logits_{}'.format(o)] = partial(_kernel_fns, out_logits=o)
class EmpiricalTest(jtu.JaxTestCase):
# We use a three layer deep linear network for testing.
@classmethod
def f(cls, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
if do_shift_x:
x = x * 2 + 1.
return [0.5 * np.dot(np.dot(x.T, w1), x) + np.dot(w2, x) + b,
(np.dot(w1, x),
w2)
]
@classmethod
def f_lin_exact(cls, x0, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
f0 = EmpiricalTest.f(x0, params, do_alter, do_shift_x)
if do_shift_x:
x0 = x0 * 2 + 1.
x = x * 2 + 1.
dx = x - x0
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
return tree_multimap(operator.add,
f0,
[np.dot(np.dot(x0.T, w1) + w2, dx),
(np.dot(w1, dx),
0.)
])
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_{}'.format(shape),
'shape': shape
} for shape in TAYLOR_MATRIX_SHAPES))
def testLinearization(self, shape):
key = random.PRNGKey(0)
key, s1, s2, s3, = random.split(key, 4)
w1 = random.normal(s1, shape)
w1 = 0.5 * (w1 + w1.T)
w2 = random.normal(s2, shape)
b = random.normal(s3, (1,) * (len(shape) - 1) + (shape[-1],))
params = (w1, w2, b)
key, split = random.split(key)
x0 = random.normal(split, (shape[-1], 1))
f_lin = empirical.linearize(EmpiricalTest.f, x0)
for _ in range(TAYLOR_RANDOM_SAMPLES):
for do_alter in [True, False]:
for do_shift_x in [True, False]:
key, split = random.split(key)
x = random.normal(split, (shape[-1], 1))
self.assertAllClose(EmpiricalTest.f_lin_exact(x0, x, params, do_alter,
do_shift_x=do_shift_x),
f_lin(x, params, do_alter, do_shift_x=do_shift_x))
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_{}'.format(shape),
'shape': shape
} for shape in TAYLOR_MATRIX_SHAPES))
def testTaylorExpansion(self, shape):
def f_2_exact(x0, x, params, do_alter, do_shift_x=True):
w1, w2, b = params
f_lin = EmpiricalTest.f_lin_exact(x0, x, params, do_alter, do_shift_x)
if do_shift_x:
x0 = x0 * 2 + 1.
x = x * 2 + 1.
if do_alter:
b *= 2.
w1 += 5.
w2 /= 0.9
dx = x - x0
return tree_multimap(operator.add,
f_lin,
[0.5 * np.dot(np.dot(dx.T, w1), dx),
(0.,
0.)
])
key = random.PRNGKey(0)
key, s1, s2, s3, = random.split(key, 4)
w1 = random.normal(s1, shape)
w1 = 0.5 * (w1 + w1.T)
w2 = random.normal(s2, shape)
b = random.normal(s3, (1,) * (len(shape) - 1) + (shape[-1],))
params = (w1, w2, b)
key, split = random.split(key)
x0 = random.normal(split, (shape[-1], 1))
f_lin = empirical.taylor_expand(EmpiricalTest.f, x0, 1)
f_2 = empirical.taylor_expand(EmpiricalTest.f, x0, 2)
for _ in range(TAYLOR_RANDOM_SAMPLES):
for do_alter in [True, False]:
for do_shift_x in [True, False]:
key, split = random.split(key)
x = random.normal(split, (shape[-1], 1))
self.assertAllClose(EmpiricalTest.f_lin_exact(x0, x, params, do_alter,
do_shift_x=do_shift_x),
f_lin(x, params, do_alter, do_shift_x=do_shift_x))
self.assertAllClose(f_2_exact(x0, x, params, do_alter,
do_shift_x=do_shift_x),
f_2(x, params, do_alter, do_shift_x=do_shift_x))
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_train_shape={}_test_shape={}_network={}_{}'.format(
train, test, network, name),
'train_shape': train,
'test_shape': test,
'network': network,
'name': name,
'kernel_fn': kernel_fn
} for train, test, network in zip(TRAIN_SHAPES, TEST_SHAPES, NETWORK)
for name, kernel_fn in KERNELS.items()))
def testNTKAgainstDirect(
self, train_shape, test_shape, network, name, kernel_fn):
key = random.PRNGKey(0)
key, self_split, other_split = random.split(key, 3)
data_self = random.normal(self_split, train_shape)
data_other = random.normal(other_split, test_shape)
implicit, direct, _ = kernel_fn(key, train_shape[1:], network,
diagonal_axes=(), trace_axes=())
implicit_batched, direct_batched, _ = kernel_fn(key, train_shape[1:],
network,
diagonal_axes=(),
trace_axes=(),
vmap_axes=0)
g = implicit(data_self, None)
g_direct = direct(data_self, None)
g_batched = implicit_batched(data_self, None)
g_direct_batched = direct_batched(data_self, None)
self.assertAllClose(g, g_direct)
self.assertAllClose(g, g_batched)
self.assertAllClose(g, g_direct_batched)
g = implicit(data_other, data_self)
g_direct = direct(data_other, data_self)
g_batched = implicit_batched(data_other, data_self)
g_direct_batched = direct_batched(data_other, data_self)
self.assertAllClose(g, g_direct)
self.assertAllClose(g, g_batched)
self.assertAllClose(g, g_direct_batched)
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_diagonal_axes={}_trace_axes={}'.format(
diagonal_axes, trace_axes),
'diagonal_axes': diagonal_axes,
'trace_axes': trace_axes,
}
for diagonal_axes in [(),
(0,),
(0, 1),
(0, 1, 2),
(0, 1, 2, 3),
(-1,),
(-2,),
(0, -1),
(1, -2),
(2, 3),
(3, 0, 2)]
for trace_axes in [(),
(0,),
(0, 1),
(-1,),
(1,),
(0, -1),
(-1, -2),
(0, 1, 2, 3),
(3, 1, 2, 0),
(1, 2, 3),
(-3, -2),
(-3, -1),
(-2, -4),
(2, 0, -1)]))
def testAxes(self, diagonal_axes, trace_axes):
key = random.PRNGKey(0)
key, self_split, other_split = random.split(key, 3)
data_self = random.normal(self_split, (4, 5, 6, 3))
data_other = random.normal(other_split, (2, 5, 6, 3))
_diagonal_axes = utils.canonicalize_axis(diagonal_axes, data_self)
_trace_axes = utils.canonicalize_axis(trace_axes, data_self)
if any(d == c for d in _diagonal_axes for c in _trace_axes):
raise absltest.SkipTest(
'diagonal axes must be different from channel axes.')
get_kernel = KERNELS['empirical_logits_3']
kwargs = dict(
key=key,
input_shape=(5, 6, 3),
network=CONV,
diagonal_axes=diagonal_axes,
trace_axes=trace_axes
)
implicit, direct, nngp = get_kernel(**kwargs)
implicit_batched, direct_batched, _ = get_kernel(**kwargs, vmap_axes=0)
n_marg = len(_diagonal_axes)
n_chan = len(_trace_axes)
g_nngp = nngp(data_self, None)
self.assertEqual(2 * (data_self.ndim - n_chan) - n_marg, g_nngp.ndim)
g_direct = direct(data_self, None)
self.assertEqual(g_nngp.shape, g_direct.shape)
g_direct_batched = direct_batched(data_self, None)
g = implicit(data_self, None)
g_batched = implicit_batched(data_self, None)
self.assertAllClose(g_direct, g)
self.assertAllClose(g_direct, g_direct_batched)
self.assertAllClose(g_direct, g_batched)
if 0 not in _trace_axes and 0 not in _diagonal_axes:
g_nngp = nngp(data_other, data_self)
self.assertEqual(2 * (data_self.ndim - n_chan) - n_marg, g_nngp.ndim)
g_direct = direct(data_other, data_self)
self.assertEqual(g_nngp.shape, g_direct.shape)
g_direct_batched = direct_batched(data_other, data_self)
g = implicit(data_other, data_self)
g_batched = implicit_batched(data_other, data_self)
self.assertAllClose(g_direct, g)
self.assertAllClose(g_direct, g_direct_batched)
self.assertAllClose(g_direct, g_batched)
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_same_inputs={}'.format(same_inputs),
'same_inputs': same_inputs
} for same_inputs in [True, False]))
def test_parallel_in_out(self, same_inputs):
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2 = np.split(random.normal(input_key1, (3, 21)), (10,), axis=1)
x2_1, x2_2 = np.split(random.normal(input_key2, (4, 21)), (10,), axis=1)
x1 = (x1_1, x1_2)
x2 = (x2_1, x2_2) if not same_inputs else None
def layer(N_out):
return stax.parallel(stax.Dense(N_out), stax.Dense(N_out + 1))
init_fn, apply_fn, _ = stax.serial(layer(1024), layer(1))
_, params = init_fn(net_key, (x1_1.shape, x1_2.shape))
implicit_kernel_fn = jit(empirical._empirical_implicit_ntk_fn(apply_fn))
direct_kernel_fn = jit(empirical._empirical_direct_ntk_fn(apply_fn))
implicit_batched_kernel_fn = jit(empirical._empirical_implicit_ntk_fn(
apply_fn, vmap_axes=(0, 0)))
direct_batched_kernel_fn = jit(empirical._empirical_direct_ntk_fn(
apply_fn, vmap_axes=(0, 0)))
k_direct = direct_kernel_fn(x1, x2, params)
self.assertAllClose(k_direct, implicit_kernel_fn(x1, x2, params))
self.assertAllClose(k_direct, direct_batched_kernel_fn(x1, x2, params))
self.assertAllClose(k_direct, implicit_batched_kernel_fn(x1, x2, params))
nngp_kernel_fn = jit(empirical.empirical_nngp_fn(apply_fn))
nngp = nngp_kernel_fn(x1, x2, params)
self.assertEqual(len(nngp), 2)
self.assertEqual(nngp[0].shape, (3, 3 if same_inputs else 4))
self.assertEqual(nngp[1].shape, (3, 3 if same_inputs else 4))
@parameterized.named_parameters(
jtu.cases_from_list({
'testcase_name': '_same_inputs={}'.format(same_inputs),
'same_inputs': same_inputs
} for same_inputs in [True, False]))
def test_parallel_nested(self, same_inputs):
rng = random.PRNGKey(0)
input_key1, input_key2, net_key = random.split(rng, 3)
x1_1, x1_2, x1_3 = np.split(random.normal(input_key1, (3, 33)),
(10, 21), axis=1)
x2_1, x2_2, x2_3 = np.split(random.normal(input_key2, (4, 33)),
(10, 21), axis=1)
x1 = ([x1_1, x1_2], x1_3)
x2 = ([x2_1, x2_2], x2_3) if not same_inputs else None
def layer(N_out):
return stax.parallel(stax.parallel(stax.Dense(N_out),
stax.Dense(N_out + 1)),
stax.Dense(N_out | |
<gh_stars>0
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import time
from itertools import chain
from mathutils import Vector
import bpy
from ...config.options import visualization, options
from ...addon_common.common.debug import dprint
from ...addon_common.common.blender import matrix_vector_mult
from ...addon_common.common.profiler import profiler
from ...addon_common.common.utils import iter_pairs
from ...addon_common.common.maths import Point, Vec, Direction, Normal, Ray, XForm, BBox
from ...addon_common.common.maths import Point2D, Vec2D, Direction2D, Accel2D
from ..rfmesh.rfmesh import RFMesh, RFVert, RFEdge, RFFace
from ..rfmesh.rfmesh import RFSource, RFTarget
from ..rfmesh.rfmesh_render import RFMeshRender
class RetopoFlow_Target:
'''
functions to work on target mesh (RFTarget)
'''
@profiler.function
def setup_target(self):
''' target is the active object. must be selected and visible '''
assert self.tar_object, 'Could not find valid target?'
self.rftarget = RFTarget.new(self.tar_object, self.unit_scaling_factor)
opts = visualization.get_target_settings()
self.rftarget_draw = RFMeshRender.new(self.rftarget, opts)
self.rftarget_version = None
self.hide_target()
self.accel_defer_recomputing = False
self.accel_recompute = True
self.accel_target_version = None
self.accel_view_version = None
self.accel_vis_verts = None
self.accel_vis_edges = None
self.accel_vis_faces = None
self.accel_vis_accel = None
self._last_visible_bbox_factor = None
self._last_visible_dist_offset = None
def hide_target(self):
self.rftarget.obj_viewport_hide()
self.rftarget.obj_render_hide()
def teardown_target(self):
# IMPORTANT: changes here should also go in rf_blendersave.backup_recover()
self.rftarget.obj_viewport_unhide()
self.rftarget.obj_render_unhide()
def done_target(self):
del self.rftarget_draw
del self.rftarget
self.tar_object.to_mesh_clear()
del self.tar_object
#########################################
# acceleration structures
def set_accel_defer(self, defer): self.accel_defer_recomputing = defer
@profiler.function
def get_vis_accel(self, force=False):
target_version = self.get_target_version(selection=False)
view_version = self.get_view_version()
recompute = self.accel_recompute
recompute |= self.accel_target_version != target_version
recompute |= self.accel_view_version != view_version
recompute |= self.accel_vis_verts is None
recompute |= self.accel_vis_edges is None
recompute |= self.accel_vis_faces is None
recompute |= self.accel_vis_accel is None
recompute |= options['visible bbox factor'] != self._last_visible_bbox_factor
recompute |= options['visible dist offset'] != self._last_visible_dist_offset
recompute &= not self.accel_defer_recomputing
recompute &= not self._nav and (time.time() - self._nav_time) > 0.25
self.accel_recompute = False
if force or recompute:
# print('RECOMPUTE VIS ACCEL')
self.accel_target_version = target_version
self.accel_view_version = view_version
self.accel_vis_verts = self.visible_verts()
self.accel_vis_edges = self.visible_edges(verts=self.accel_vis_verts)
self.accel_vis_faces = self.visible_faces(verts=self.accel_vis_verts)
self.accel_vis_accel = Accel2D(self.accel_vis_verts, self.accel_vis_edges, self.accel_vis_faces, self.get_point2D)
self._last_visible_bbox_factor = options['visible bbox factor']
self._last_visible_dist_offset = options['visible dist offset']
else:
self.accel_vis_verts = { bmv for bmv in self.accel_vis_verts if bmv.is_valid } if self.accel_vis_verts is not None else None
self.accel_vis_edges = { bme for bme in self.accel_vis_edges if bme.is_valid } if self.accel_vis_edges is not None else None
self.accel_vis_faces = { bmf for bmf in self.accel_vis_faces if bmf.is_valid } if self.accel_vis_faces is not None else None
return self.accel_vis_accel
@profiler.function
def accel_nearest2D_vert(self, point=None, max_dist=None, vis_accel=None, selected_only=None):
xy = self.get_point2D(point or self.actions.mouse)
if not vis_accel: vis_accel = self.get_vis_accel()
if not vis_accel: return None,None
if not max_dist:
verts = self.accel_vis_verts
else:
max_dist = self.drawing.scale(max_dist)
verts = vis_accel.get_verts(xy, max_dist)
if selected_only is not None:
verts = { bmv for bmv in verts if bmv.select == selected_only }
return self.rftarget.nearest2D_bmvert_Point2D(xy, self.Point_to_Point2D, verts=verts, max_dist=max_dist)
@profiler.function
def accel_nearest2D_edge(self, point=None, max_dist=None, vis_accel=None, selected_only=None):
xy = self.get_point2D(point or self.actions.mouse)
if not vis_accel: vis_accel = self.get_vis_accel()
if not vis_accel: return None,None
if not max_dist:
edges = self.accel_vis_edges
else:
max_dist = self.drawing.scale(max_dist)
edges = vis_accel.get_edges(xy, max_dist)
if selected_only is not None:
edges = { bme for bme in edges if bme.select == selected_only }
return self.rftarget.nearest2D_bmedge_Point2D(xy, self.Point_to_Point2D, edges=edges, max_dist=max_dist)
@profiler.function
def accel_nearest2D_face(self, point=None, max_dist=None, vis_accel=None, selected_only=None):
xy = self.get_point2D(point or self.actions.mouse)
if not vis_accel: vis_accel = self.get_vis_accel()
if not vis_accel: return None
if not max_dist:
faces = self.accel_vis_faces
else:
max_dist = self.drawing.scale(max_dist)
faces = vis_accel.get_faces(xy, max_dist)
if selected_only is not None:
faces = { bmf for bmf in faces if bmf.select == selected_only }
return self.rftarget.nearest2D_bmface_Point2D(self.Vec_forward(), xy, self.Point_to_Point2D, faces=faces) #, max_dist=max_dist)
#########################################
# find target entities in screen space
def get_point2D(self, point):
if point.is_2D(): return point
return self.Point_to_Point2D(point)
@profiler.function
def nearest2D_vert(self, point=None, max_dist=None, verts=None):
xy = self.get_point2D(point or self.actions.mouse)
if max_dist: max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmvert_Point2D(xy, self.Point_to_Point2D, verts=verts, max_dist=max_dist)
@profiler.function
def nearest2D_verts(self, point=None, max_dist:float=10, verts=None):
xy = self.get_point2D(point or self.actions.mouse)
max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmverts_Point2D(xy, max_dist, self.Point_to_Point2D, verts=verts)
@profiler.function
def nearest2D_edge(self, point=None, max_dist=None, edges=None):
xy = self.get_point2D(point or self.actions.mouse)
if max_dist: max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmedge_Point2D(xy, self.Point_to_Point2D, edges=edges, max_dist=max_dist)
@profiler.function
def nearest2D_edges(self, point=None, max_dist:float=10, edges=None):
xy = self.get_point2D(point or self.actions.mouse)
if max_dist: max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmedges_Point2D(xy, max_dist, self.Point_to_Point2D, edges=edges)
# TODO: implement max_dist
@profiler.function
def nearest2D_face(self, point=None, max_dist=None, faces=None):
xy = self.get_point2D(point or self.actions.mouse)
if max_dist: max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmface_Point2D(self.Vec_forward(), xy, self.Point_to_Point2D, faces=faces)
# TODO: fix this function! Izzza broken
@profiler.function
def nearest2D_faces(self, point=None, max_dist:float=10, faces=None):
xy = self.get_point2D(point or self.actions.mouse)
if max_dist: max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest2D_bmfaces_Point2D(xy, self.Point_to_Point2D, faces=faces)
####################
# REWRITE BELOW!!! #
####################
# def nearest2D_face_Point2D(self, point:Point2D, faces=None):
# return self.rftarget.nearest2D_bmface_Point2D(point, self.Point_to_Point2D, faces=faces)
# def nearest2D_face_point(self, point):
# xy = self.get_point2D(point)
# return self.rftarget.nearest2D_bmface_Point2D(xy, self.Point_to_Point2D)
# def nearest2D_face_mouse(self):
# return self.nearest2D_face_point(self.actions.mouse)
# def nearest2D_face_point(self, point):
# # if max_dist: max_dist = self.drawing.scale(max_dist)
# xy = self.get_point2D(point)
# return self.rftarget.nearest2D_bmface_Point2D(xy, self.Point_to_Point2D)
########################################
# find target entities in world space
def get_point3D(self, point):
if point.is_3D(): return point
xyz,_,_,_ = self.raycast_sources_Point2D(point)
return xyz
def nearest_vert_point(self, point, verts=None):
xyz = self.get_point3D(point)
if xyz is None: return None
return self.rftarget.nearest_bmvert_Point(xyz, verts=verts)
def nearest_vert_mouse(self, verts=None):
return self.nearest_vert_point(self.actions.mouse, verts=verts)
def nearest_verts_point(self, point, max_dist:float, bmverts=None):
xyz = self.get_point3D(point)
if xyz is None: return None
return self.rftarget.nearest_bmverts_Point(xyz, max_dist, bmverts=bmverts)
def nearest_verts_mouse(self, max_dist:float):
return self.nearest_verts_point(self.actions.mouse, max_dist)
def nearest_edges_Point(self, point, max_dist:float):
max_dist = self.drawing.scale(max_dist)
return self.rftarget.nearest_bmedges_Point(point, max_dist)
def nearest_edge_Point(self, point:Point, edges=None):
return self.rftarget.nearest_bmedge_Point(point, edges=edges)
#######################################
# get visible geometry
@profiler.function
def visible_verts(self):
return self.rftarget.visible_verts(self.is_visible)
@profiler.function
def visible_edges(self, verts=None):
return self.rftarget.visible_edges(self.is_visible, verts=verts)
@profiler.function
def visible_faces(self, verts=None):
return self.rftarget.visible_faces(self.is_visible, verts=verts)
def iter_verts(self):
yield from self.rftarget.iter_verts()
def iter_edges(self):
yield from self.rftarget.iter_edges()
def iter_faces(self):
yield from self.rftarget.iter_faces()
########################################
# symmetry utils
def apply_symmetry(self):
self.undo_push('applying symmetry')
self.rftarget.apply_symmetry(self.nearest_sources_Point)
@profiler.function
def clip_pointloop(self, pointloop, connected):
# assuming loop will cross symmetry line exactly zero or two times
l2w_point,w2l_point = self.rftarget.xform.l2w_point,self.rftarget.xform.w2l_point
pointloop = [w2l_point(pt) for pt in pointloop]
if self.rftarget.mirror_mod.x and any(p.x < 0 for p in pointloop):
if connected:
rot_idx = next(i for i,p in enumerate(pointloop) if p.x < 0)
pointloop = pointloop[rot_idx:] + pointloop[:rot_idx]
npl = []
for p0,p1 in iter_pairs(pointloop, connected):
if p0.x < 0 and p1.x < 0: continue
elif p0.x == 0: npl += [p0]
elif p0.x > 0 and p1.x > 0: npl += [p0]
else:
connected = False
npl += [p0 + (p1 - p0) * (p0.x / (p0.x - p1.x))]
if npl:
npl[0].x = 0
npl[-1].x = 0
pointloop = npl
if self.rftarget.mirror_mod.y and any(p.y > 0 for p in pointloop):
if connected:
rot_idx = next(i for i,p in enumerate(pointloop) if p.y > 0)
pointloop = pointloop[rot_idx:] + pointloop[:rot_idx]
npl = []
for p0,p1 in iter_pairs(pointloop, connected):
if p0.y > 0 and p1.y > 0: continue
elif p0.y == 0: npl += [p0]
elif p0.y < 0 and p1.y < 0: npl += [p0]
else:
connected = False
npl += [p0 + (p1 - p0) * (p0.y / (p0.y - p1.y))]
if npl:
npl[0].y = 0
npl[-1].y = 0
pointloop = npl
if self.rftarget.mirror_mod.z and any(p.z < 0 for p in pointloop):
if connected:
rot_idx = next(i for i,p in enumerate(pointloop) if p.z < 0)
pointloop = pointloop[rot_idx:] + pointloop[:rot_idx]
npl = []
for p0,p1 in iter_pairs(pointloop, connected):
if p0.z < 0 and p1.z < 0: continue
elif p0.z == 0: npl += [p0]
elif p0.z > 0 and p1.z > 0: npl += [p0]
else:
connected = False
npl += [p0 + (p1 - p0) * (p0.z / (p0.z - p1.z))]
if npl:
npl[0].z = 0
npl[-1].z = 0
pointloop = npl
pointloop = [l2w_point(pt) for pt in pointloop]
return (pointloop, connected)
def clamp_pointloop(self, pointloop, connected):
return (pointloop, connected)
def is_point_on_mirrored_side(self, point):
p | |
"""
This file contains functions related to the annual report
"""
import pandas as _pd
# import numpy as _np
# import altair as _alt
#######################################################################################################################
def fields_summary_table(
points,
artifacts,
fid_col="geo_field",
save_excel=False,
excel_file="field_counts.xlsx",
excel_sheet="field_data",
):
"""Create table (in Spanish) with basic stats by field
Parameters
----------
points : DataFrame
pandas DataFrame of survey points
artifacts : DataFrame
pandas DataFrame of artifacts
fid_col: str, optional
Column containing the field identifier to use for grouping
save_excel : bool, optional
Specify whether to write DataFrame to an Excel sheet
excel_file : str, optional
Filename for Excel
excel_sheet : str, optional
Sheet name for Excel
Returns
-------
fields_data : DataFrame
DataFrame version of the created table
Notes
-----
The table is in the form:
| Polígono | Parcela | Subparcela | Id | Núm. Pts. | Núm. Frags. | Pos. Pts. |
| -------- | ------- | ---------- | ------ | --------- | ----------- | --------- |
| 03 | 027 | 0 | 030270 | 351 | 39 | 26 |
| 16 | 092 | a | 16092a | 105 | 58 | 51 |
"""
# count number of points per geographic field
fields = points_per_group(points, grouper_col=fid_col, ct_col="Núm. Pts.")
# count number of artifacts per field
artifact_cts = artifacts_per_group(
artifacts, grouper_col=fid_col, ct_col="Núm. Frags."
)
# count number of points with artifacts per field
pos_pts = pos_points_per_group(
artifacts, grouper_col=fid_col, pt_id_col="SurveyPointId", ct_col="Pos. Pts."
)
# concatenate the three dataframes created above with an outer join
fields_data = _pd.concat([fields, artifact_cts, pos_pts], axis=1).fillna(0)
fields_data.reset_index(inplace=True)
fields_data.rename(columns={"index": "Id"}, inplace=True)
# set counts columns to proper integer datatypes
fields_data[["Núm. Pts.", "Núm. Frags.", "Pos. Pts."]] = fields_data[
["Núm. Pts.", "Núm. Frags.", "Pos. Pts."]
].astype(int)
# split up the `Id` column into various components
fields_data["Polígono"] = fields_data["Id"].str[0:2]
fields_data["Parcela"] = fields_data["Id"].str[2:5]
fields_data["Subparcela"] = fields_data["Id"].str[5]
# reorder columns
fields_data = fields_data[
[
"Polígono",
"Parcela",
"Subparcela",
"Id",
"Núm. Pts.",
"Núm. Frags.",
"Pos. Pts.",
]
]
if save_excel:
# save to Excel sheet
fields_data.to_excel(excel_file, sheet_name=excel_sheet)
return fields_data
#######################################################################################################################
def points_per_group(points, grouper_col, ct_col="Núm. Pts."):
"""Count the points in the given group
Parameters
----------
points : pandas DataFrame
Point observations
grouper_col : str
Column in `points` that you want to use to group, usually 'geo_field' or similar
ct_col : str, optional
Name to give the new column
Returns
-------
df : pandas DataFrame
Points grouped and counted according to the grouper_col
"""
df = (
points.groupby(grouper_col)
.size()
.reset_index(name=ct_col)
.set_index(grouper_col)
)
df.index.rename("Id", inplace=True)
return df
#######################################################################################################################
def artifacts_per_group(artifacts, grouper_col, ct_col="Núm. Frags."):
"""Count the artifacts in the given group
Parameters
----------
artifacts : pandas DataFrame
Artifact observations
grouper_col : str
Column in `artifacts` that you want to use to group, usually 'geo_field' or similar
ct_col : str, optional
Name to give the new column
Returns
-------
df : pandas DataFrame
Artifacts grouped and counted according to the grouper_col
"""
df = (
artifacts.groupby(grouper_col)
.size()
.reset_index(name=ct_col)
.set_index(grouper_col)
)
return df
#######################################################################################################################
def pos_points_per_group(
artifacts, grouper_col, pt_id_col="SurveyPointId", ct_col="Pos. Pts."
):
"""Count the number of points with artifacts in the given group
Parameters
----------
artifacts : pandas DataFrame
Artifact observations
grouper_col : str
Column in `artifacts` that you want to use to group, usually 'geo_field' or similar
pt_id_col : str, optional
Column that contains unique point identifier, usually 'SurveyPointId'
ct_col : str, optional
Name to give the new column
Returns
-------
df : pandas DataFrame
Number of points with artifacts grouped and counted according to the grouper_col
"""
df = (
artifacts.groupby([grouper_col, pt_id_col])
.size()
.reset_index()
.set_index(grouper_col)
)
df = df.groupby(grouper_col).size().reset_index(name=ct_col).set_index(grouper_col)
return df
#######################################################################################################################
def time_span_chart(data):
"""Create a chart showing all productions, their counts, their percentages, and their time spans
Parameters
----------
data : pandas DataFrame
Data containing columns `['Catalan', 'EarlyChrono', 'LateChrono', 'count', 'pct']`
Returns
-------
fig: matplotlib Figure
"""
import matplotlib.pyplot as plt
from matplotlib import collections as mc
from matplotlib.lines import Line2D
import seaborn as sns
def make_proxy(zvalue, scalar_mappable, **kwargs):
"""Helper function for creating the legend
"""
COLOR = "black"
return Line2D([0, 1], [0, 1], color=COLOR, solid_capstyle="butt", **kwargs)
data = data.sort_values(by="EarlyChrono", ascending=False)
data = data.assign(order_y=[i + 1 for i in range(data.shape[0])])
# create tuples of the form (x_start, order_y) and (x_end, order_y)] for each production
data["start_pt"] = list(zip(data["EarlyChrono"], data["order_y"]))
data["end_pt"] = list(zip(data["LateChrono"], data["order_y"]))
# create label for production type and count (Catalan)
data["ylabel"] = data["Catalan"].map(str) + " - " + data["count"].map(str)
# make list of lists of coordinates
field_lines = [list(a) for a in zip(data["start_pt"], data["end_pt"])]
# items needed for legend construction
LW_BINS = [0, 10, 25, 50, 75, 90, 100] # bins for line width
LW_LABELS = [3, 6, 9, 12, 15, 18] # line widths
# convert percentages to line widths based on bin values
data["lw"] = _pd.cut(data["pct"], bins=LW_BINS, labels=LW_LABELS)
data["lw"] = _pd.to_numeric(data["lw"])
# lines for each production with linewidths as determined above
lc = mc.LineCollection(field_lines, color="black", linewidths=list(data["lw"]))
# start and end values for x-axis (negative = BC, positive = AD)
START = -1600
END = 2001
# create thin gray lines that stretch horizontally across the whole plot
data["gray_start"] = START
data["gray_end"] = END
data["gray_start_pt"] = list(zip(data["gray_start"], data["order_y"]))
data["gray_end_pt"] = list(zip(data["gray_end"], data["order_y"]))
gray_lines = [list(B) for B in zip(data["gray_start_pt"], data["gray_end_pt"])]
# horizontal gray lines
lc2 = mc.LineCollection(gray_lines, color="gray", linewidth=0.5)
# units for plot creation; t='top', b='bottom'
HEIGHT_UNIT = 0.15
T = 1.0
B = 0.7 # inch
# x-ticks
INTERVAL = 400 # x-axis tick interval
xticks = [x for x in range(START, END, INTERVAL)] # create ticks
# x-values for vertical lines to be added representing time period boundaries
VERT_LINES = [-850, -550, -123, 455, 533, 902, 1229, 1492, 1789]
# time period labels
# pos = abs(start-val)/float(abs(end-start))
TLABEL_V = 1.01
TLABEL_ANG = 90
PERIOD_LABELS = {
"Navetiforme": [
abs(START - (-1225)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Talaiòtic": [
abs(START - (-700)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Posttalaiòtic": [
abs(START - (-336.5)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Romana": [abs(START - (166)) / float(abs(END - START)), TLABEL_V, TLABEL_ANG],
"Vàndala": [abs(START - (500)) / float(abs(END - START)), TLABEL_V, TLABEL_ANG],
"Bizantina": [
abs(START - (717.5)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Àndalusina": [
abs(START - (1065.5)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Medieval\nCristiana": [
abs(START - (1360.5)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Moderna": [
abs(START - (1640.5)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
"Contemporània": [
abs(START - (2000)) / float(abs(END - START)),
TLABEL_V,
TLABEL_ANG,
],
}
# create plot and set properties
sns.set(style="ticks")
sns.set_context("notebook")
height = HEIGHT_UNIT * (data.shape[0] + 1) + T + B
fig = plt.figure(figsize=(6.5, height))
ax = fig.add_subplot(111)
ax.set_ylim(0, data.shape[0] + 0.5)
fig.subplots_adjust(bottom=B / height, top=1 - T / height, left=0.45, right=0.95)
ax.add_collection(lc2)
ax.add_collection(lc)
ax.set_xlim(left=START, right=END)
ax.set_xticks(xticks)
ax.xaxis.set_ticks_position("bottom")
ax.tick_params(axis="x", labelsize=8)
sns.despine(left=True)
ax.set_yticks(data["order_y"])
ax.set(yticklabels=data["ylabel"])
ax.tick_params(axis="y", length=0, labelsize=8)
# place time period vertical lines from list of x values
for vline in VERT_LINES:
plt.axvline(x=vline, ls="dashed", lw=0.5, color="gray")
# place time period labels
for period, val in PERIOD_LABELS.items():
ax.text(
x=val[0],
y=val[1],
s=period,
color="gray",
fontsize=8,
horizontalalignment="center",
verticalalignment="bottom",
rotation=val[2],
transform=ax.transAxes,
)
# legend
proxies = [make_proxy(item, lc, linewidth=item) for item in LW_LABELS]
leg = ax.legend(
proxies,
["0-10%", "10-25%", "25-50%", "50-75%", "75-90%", "90-100%"],
bbox_to_anchor=(0.05, 0.0),
bbox_transform=fig.transFigure,
loc="lower left",
ncol=6,
labelspacing=3.0,
handlelength=4.0,
handletextpad=0.5,
markerfirst=False,
columnspacing=0.5,
frameon=False,
fontsize=8,
)
for txt in leg.get_texts():
txt.set_ha("left") # horizontal alignment of text item
# txt.set_x(0) # x-position
# txt.set_y() # y-position
return fig
#######################################################################################################################
def write_excel_table(unit, data, writer):
"""Format data and add as worksheet to Excel output
Parameters
----------
unit : str
Name of field or other data group; will be used as Sheet name in Excel
data : pandas `DataFrame`
Data to write to the file
writer : `ExcelWriter`
Excel file
Returns
-------
None
"""
table_data = data.loc[:, ["Catalan", "count", "pct"]].rename(
columns={"Catalan": "Producció", "count": "Núm", "pct": "%"}
)
MAX_LEN = 40
table_data.to_excel(
writer, sheet_name=str(unit), index=False, startrow=1, header=False
)
workbook = writer.book
worksheet = writer.sheets[str(unit)]
# header
header_format = workbook.add_format(
{
"bold": True,
"align": "center",
"font_name": "Arial",
"font_size": 10,
"bottom": 2,
}
)
for col_num, value in enumerate(table_data.columns.values):
worksheet.write(0, col_num, value, header_format)
header_format2 | |
property_kind_list, vdb_static_properties,
vdb_recurrent_properties and timestep_selection arguments can be used to filter the required properties;
if both keyword_list and property_kind_list are provided, a property must match an item in both lists in order
to be included; if recurrent properties are being included then all vdb's should contain the same number of reporting
steps in their recurrent data and these should relate to the same set of timestamps; timestamp data is extracted from a
summary file for the first realisation; no check is made to ensure that reporting timesteps in different realisations
are actually for the same date.
"""
assert epc_file.endswith('.epc')
assert vdb_static_properties or vdb_recurrent_properties, 'no properties selected for ensemble import'
if progress_fn is not None: progress_fn(0.0)
# fetch a sorted list of the vdb paths found in the run directory tree
ensemble_list = vdb.ensemble_vdb_list(ensemble_run_dir)
if len(ensemble_list) == 0:
log.error("no vdb's found in run directory tree: " + str(ensemble_run_dir))
return None
if not existing_epc:
model = import_nexus(epc_file[:-4], # output path and file name without .epc or .h5 extension
extent_ijk = extent_ijk, # 3 element numpy vector, in case extent is not automatically determined
vdb_file = ensemble_list[0], # vdb input file
corp_xy_units = corp_xy_units,
corp_z_units = corp_z_units,
corp_z_inc_down = corp_z_inc_down,
ijk_handedness = ijk_handedness,
geometry_defined_everywhere = geometry_defined_everywhere,
treat_as_nan = treat_as_nan,
resqml_xy_units = resqml_xy_units,
resqml_z_units = resqml_z_units,
resqml_z_inc_down = resqml_z_inc_down,
shift_to_local = shift_to_local,
local_origin_place = local_origin_place, # 'centre' or 'minimum'
max_z_void = max_z_void, # import will fail if vertical void greater than this is encountered
split_pillars = split_pillars,
split_tolerance = split_tolerance, # applies to each of x, y, z differences
vdb_static_properties = False,
vdb_recurrent_properties = False,
create_property_set = False)
model = rq.Model(epc_file = epc_file) # shouldn't be necessary if just created but it feels safer to re-open the model
assert model is not None, 'failed to instantiate model'
grid = model.grid()
assert grid is not None, 'grid not found'
ext_uuid = model.h5_uuid()
assert ext_uuid is not None, 'failed to determine uuid for hdf5 file reference'
hdf5_file = model.h5_file_name(uuid = ext_uuid)
# create reporting timestep time series for recurrent data, if required, based on the first realisation
recur_time_series = None
recur_ts_uuid = None
timestep_list = None
if vdb_recurrent_properties:
summary_file = ensemble_list[0][:-4] + '.sum' # TODO: check timestep summary file extension, .tssum?
full_time_series = rts.time_series_from_nexus_summary(summary_file)
if full_time_series is None:
log.error('failed to extract info from timestep summary file; disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
vdbase = vdb.VDB(ensemble_list[0])
timestep_list = vdbase.list_of_timesteps()
if len(timestep_list) == 0:
log.warning('no ROOT recurrent data found in vdb for first realisation; disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
if timestep_selection == 'all' or ('first' in timestep_selection): fs_index = 0
else: fs_index = -1
first_stamp = full_time_series.timestamp(timestep_list[fs_index])
if first_stamp is None:
log.error('first timestamp number selected for import was not found in summary file: ' + str(timestep_list[fs_index]))
log.error('disabling recurrent property import')
vdb_recurrent_properties = False
if vdb_recurrent_properties:
recur_time_series = rts.TimeSeries(model, extract_from_xml = False, first_timestamp = first_stamp)
if timestep_selection == 'all': remaining_list = timestep_list[1:]
elif timestep_selection == 'first and last': remaining_list = [timestep_list[-1]]
else: remaining_list = []
for timestep_number in remaining_list:
stamp = full_time_series.timestamp(timestep_number)
if stamp is None:
log.error('timestamp number for which recurrent data exists was not found in summary file: ' + str(timestep_number))
log.error('disabling recurrent property import')
vdb_recurrent_properties = False
recur_time_series = None
break
recur_time_series.add_timestamp(stamp)
if recur_time_series is not None:
recur_ts_node = recur_time_series.create_xml(title = 'simulator recurrent array timestep series')
recur_ts_uuid = rqet.uuid_for_part_root(recur_ts_node)
model.time_series = recur_ts_node # save as the primary time series for the model
if create_complete_property_set or create_property_set_per_timestep:
complete_collection = rp.GridPropertyCollection()
complete_collection.set_grid(grid)
else:
complete_collection = None
# main loop over realisations
for realisation in range(len(ensemble_list)):
if progress_fn is not None: progress_fn(float(1 + realisation) / float(1 + len(ensemble_list)))
vdb_file = ensemble_list[realisation]
log.info('processing realisation ' + str(realisation) + ' from: ' + str(vdb_file))
vdbase = vdb.VDB(vdb_file)
# case_list = vdbase.cases()
# assert len(case_list) > 0, 'no cases found in vdb: ' + str(vdb_file)
# if len(case_list) > 1: log.warning('more than one case found in vdb (using first): ' + str(vdb_file))
# vdb_case = case_list[0]
# vdbase.set_use_case(vdb_case)
vdbase.set_extent_kji(grid.extent_kji)
prop_import_collection = rp.GridPropertyCollection(realization = realisation)
prop_import_collection.set_grid(grid)
decoarsen_array = None
if vdb_static_properties:
props = vdbase.list_of_static_properties()
if len(props) > 0:
for keyword in props:
if keyword_list is not None and keyword not in keyword_list: continue
if property_kind_list is not None:
prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)
if prop_kind not in property_kind_list and prop_kind not in ['active', 'region initialization']: continue
prop_import_collection.import_vdb_static_property_to_cache(vdbase, keyword, realization = realisation)
if decoarsen:
decoarsen_array = prop_import_collection.decoarsen_imported_list()
if decoarsen_array is not None: log.debug('static properties decoarsened for realisation ' + str(realisation))
grid.write_hdf5_from_caches(hdf5_file, mode = 'a', geometry = False,
imported_properties = prop_import_collection, write_active = False)
prop_import_collection.remove_all_cached_arrays()
if vdb_recurrent_properties:
r_timestep_list = vdbase.list_of_timesteps() # get list of timesteps for which recurrent files exist
if len(r_timestep_list) < recur_time_series.number_of_timestamps():
log.error('insufficient number of reporting timesteps; skipping recurrent data for realisation ' + str(realisation))
else:
common_recur_prop_set = None
for tni in range(recur_time_series.number_of_timestamps()):
if timestep_selection in ['all', 'first']:
timestep_number = timestep_list[tni]
r_timestep_number = r_timestep_list[tni]
elif timestep_selection == 'last' or tni > 0:
timestep_number = timestep_list[-1]
r_timestep_number = r_timestep_list[-1]
else:
timestep_number = timestep_list[0]
r_timestep_number = r_timestep_list[0]
stamp = full_time_series.timestamp(timestep_number)
recur_prop_list = vdbase.list_of_recurrent_properties(r_timestep_number)
if common_recur_prop_set is None: common_recur_prop_set = set(recur_prop_list)
elif recur_prop_list is not None: common_recur_prop_set = common_recur_prop_set.intersection(set(recur_prop_list))
step_import_collection = rp.GridPropertyCollection()
step_import_collection.set_grid(grid)
# for each property for this timestep, cache array and add to recur prop import collection for this time step
if recur_prop_list:
for keyword in recur_prop_list:
if not keyword or not keyword.isalnum(): continue
if keyword_list is not None and keyword not in keyword_list: continue
if property_kind_list is not None:
prop_kind, _, _ = rp.property_kind_and_facet_from_keyword(keyword)
if prop_kind not in property_kind_list: continue
step_import_collection.import_vdb_recurrent_property_to_cache(vdbase, r_timestep_number, keyword,
time_index = tni, # index into recur_time_series
realization = realisation)
if decoarsen_array is not None:
step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)
# extend hdf5 with cached arrays for this timestep
# log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) +
# ' is: ' + str(step_import_collection.number_of_imports()))
# log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))
grid.write_hdf5_from_caches(hdf5_file, mode = 'a', geometry = False,
imported_properties = step_import_collection, write_active = False)
# add imported list for this timestep to full imported list
prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)
# log.debug('total number of property arrays after timestep: ' + str(timestep_number) +
# ' is: ' + str(prop_import_collection.number_of_imports()))
# remove cached copies of arrays
step_import_collection.remove_all_cached_arrays()
if len(prop_import_collection.imported_list) == 0:
log.warning('no properties imported for realisation ' + str(realisation))
continue
prop_import_collection.create_xml_for_imported_list_and_add_parts_to_model(ext_uuid, time_series_uuid = recur_ts_uuid)
if create_property_set_per_realization:
prop_import_collection.create_property_set_xml('property set for realization ' + str(realisation))
if complete_collection is not None:
complete_collection.inherit_parts_from_other_collection(prop_import_collection)
if complete_collection is not None:
if create_property_set_per_timestep and recur_time_series is not None:
for tni in range(recur_time_series.number_of_timestamps()):
ts_collection = rp.selective_version_of_collection(complete_collection, time_index = tni)
if ts_collection.number_of_parts() > 0:
ts_collection.create_property_set_xml('property set for time index ' + str(tni))
if create_complete_property_set:
complete_collection.create_property_set_xml('property set for ensemble vdb import')
# mark model as modified (will already have happened anyway)
model.set_modified()
# rewrite epc file
log.info('storing updated model in epc file ' + epc_file)
model.store_epc(epc_file)
if progress_fn is not None: progress_fn(1.0)
# return updated resqml model
return model
def add_ab_properties(epc_file, # existing resqml model
grid_uuid = None, # optional grid uuid, required if more than one grid in model; todo: handle list of grids?
ext_uuid = None, # if None, hdf5 file holding grid geometry will be used
ab_property_list = None): # list of (file_name, keyword, property_kind, facet_type, facet, uom, time_index, null_value,
# discrete, realization)
"""Process a list of pure binary property array files, adding as parts of model, related to grid (hdf5 file is appended to)."""
assert ab_property_list, 'property list is empty or missing'
model = rq.Model(epc_file = epc_file)
grid_node = model.root_for_ijk_grid(uuid = grid_uuid) # will raise an exception if uuid is None and Model has more than 1 grid
assert grid_node is not None, 'grid not found in model'
grid = grr.any_grid(parent_model = model, grid_root = grid_node, find_properties = False)
if ext_uuid is None:
ext_node = rqet.find_nested_tags(grid.geometry_root, ['Points', 'Coordinates', 'HdfProxy', 'UUID'])
if ext_node is not None: ext_uuid = bu.uuid_from_string(ext_node.text.strip())
# ab_property_list: list of (filename, keyword, property_kind, | |
# -*- coding: utf-8 -*-
"""Part 3 - Personalized Diagnosis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1XEb3sev4HFHW_lk33aAbP2RbMD9HALTw
#Network and Personalized Diagnosis
## Import Packages
First we import all required Python packages that we need during the project.
"""
# the pandas package provides tools for storing and manipulating dataframes
import pandas as pd
# numpy package for working with arrays of numbers
import numpy as np
# package for functions to plot numeric data
import matplotlib.pyplot as plt
# package for using filesystem
import os
# the networkx package provides methods for generating and processing networks
import networkx as nx
# datetime package provides methods to process date and time strings
import datetime as dt
# geopy package provides methods to process gps location data
from geopy.distance import geodesic
# import module preprocessing from package sklearn
from sklearn import preprocessing
"""## Step 1 - Networked Data and Model
Consider the location recordings stored in the file "PeopleLocations.csv" whose first line contains the column headers:
`ID; Latidute; Longitude; Date; Time; Covid19`
Each row contains entries for these columns separated by a semicolon `;`. Each row represents an individual with the column `ID` holding a string which is the identifier of the corresponding individual. The columns `Latitude` and `Longitude`are GPS coordinates of the individual at the time of recording. The fields `Date` and `Time` are strings representing the date and time of the recording. For each row, the last column `Covid19` contains `"y"` if the individual has been found Covid-19 positive at the time of recording. Otherwise the field `Covid19` contains `"n"`.
We will also use features of audio recordings for the individuals listed in "PeopleLocations.csv". These features are stored in the file "PeopleLocationsFeatures.csv" whose first line contains the column headers:
`ID;x1;x2;x3;x4;x5;x6`
Each row contains entries for these columns separated by a semicolon `;`. Each row represents an individual with the column `ID` holding a string which is the identifier of the corresponding individual. The columns `x1`,...,`x6` are numeric features extracted from an audio recording of the individual.
This step requires to construct a `networkx` object `G`. The object `G` represens an undirected contact network whose nodes are the individuals listed in "PeopleLocations.csv". The network `G` has an edge between nodes that represent individuals for which there are location recordings in "PeopleLocations.csv" within a geodesic distance of 2 meters.
Each node in `G` should have the attributes
- "y": which is equal to 1 if the corresponding location recording indicates an infection and equal to 0 otherwise.
- "x": six features x1,...,x6 of an audio recording for this individual. These features are stored in "PeopleLocationsFeatures.csv".
- "w": this attribute contains the weight vector w=(w1,...,w6)^{T} for a linear classifier
- "b": the intercept term for a linear classifier
"""
# read in data from PeopleLocations.csv into dataframe "df"
df = pd.read_csv('PeopleLocations.csv',sep=';')
# read in data from PeopleLocationsFeatures.csv into dataframe "df_features"
df_features = pd.read_csv('PeopleLocationsFeatures.csv',sep=';')
nrfeatures=6
# store the first colum of dataframe df in variable "IDcol"
IDcol = df.columns[0]
# determine a list of different individuals for which there is at least one record in the csv file
uniquepart = df[IDcol].unique()
# count the number of different individuals. this will be the number of nodes in the contace network
nrnodes = len(uniquepart)
# create networkx object `G` by adding nodes for each individual with a record in "PeopleLocations.csv"
G = nx.Graph()
# we use a label encoder used to transfrom values 'y'/'n' for Covid19 infection to values 1 and 0
le = preprocessing.LabelEncoder()
le.fit(["n", "y"])
# iterate over individuals represnted by network nodes indexed by nodeidx=0,1,...
for nodeidx in range(nrnodes):
# read in identifier of individual from list `uniquepart` and store in variable "personid"
personid = uniquepart[nodeidx]
# create dataframe "dmydf" by selecting all rows from dataframe `df` with attribute `ID` equal to `personid`
dmydf = pd.DataFrame(df.loc[df['ID'] == personid].copy())
# create dataframe "dmydf_features" by selecting all rows from dataframe `df` with attribute `ID` equal to `personid`
dmydf_features = pd.DataFrame(df_features.loc[df_features['ID'] == personid].copy())
# reset index of dataframe dmydf
dmydf.reset_index(drop=True, inplace=True)
# reset index of dataframe dmydf_features
dmydf_features.reset_index(drop=True, inplace=True)
# read in latitude of first location recording in `dmydf` and store in variable `latitude`
latitude=dmydf.loc[0,['Lat']][0]
# read in longitude of first location recording in `dmydf` and store in variable `longitude`
longitude=dmydf.loc[0,['Lon']][0]
# read in Covid19 infection status of first location recording in `dmydf` and store in variable `valtmp`
valtmp=dmydf.loc[0,['Covid19']][0]
# use le.transform() to map the infection status `valtmp` as `y`->1 and `n`-> 0
infected=le.transform([valtmp])
# read in the date of the recording and store in variable date_tmp
date_tmp = dt.datetime.strptime(dmydf.loc[0,['Date']][0], '%d-%m-%Y').date()
# read in the time of the recording and store in variable time_tmp
time_tmp = dt.datetime.strptime(dmydf.loc[0,['Time']][0], '%H:%M:%S').time()
# combine date and time of location racording using `datetime.combine()
mydatetime = dt.datetime.combine(date_tmp, time_tmp)
# add a node with index `nodeidx`
G.add_node(nodeidx)
# set the node attribute "name" to the string stored in "personid"
G.nodes[nodeidx]['name']= personid
# set the node attribute "coords" to a numpy array with entries "latitude" and "longitude"
G.nodes[nodeidx]['coords']= np.array([latitude,longitude])
# set the node attribute "timestamp" to the value of "mydatetime"
G.nodes[nodeidx]['timestamp'] = mydatetime
# set the node attribute "y" equal to 1 if individual has been reported as Covid-19 infected and 0 otherwise
G.nodes[nodeidx]['y'] = infected[0]
# set the node attribute "w" to a numpy array of shape (6,) and entries all zero
G.nodes[nodeidx]['w'] = np.zeros(nrfeatures)
# set the node attribute "b" to 0.0
G.nodes[nodeidx]['b'] = 0.0
# read in the features x1,...,x6 from dataframe "dmydf_features" and store in numpy array "dmyvec"
dmyvec = np.zeros(nrfeatures)
for iterfeature in range(nrfeatures):
keytmp = "x%d"% (iterfeature+1)
dmyvec[iterfeature]=dmydf_features.loc[0,[keytmp]][0]
# set the node attribute "x" to the numpy array "dmyvec"
G.nodes[nodeidx]['x'] = dmyvec
"""To build the contact network we add an edge between nodes representing individuals for which we can find location recording which are closer than 2 meters. """
# two nested for-loops over node indices 0,1,...,nrnodes-1
# the loop variables are named "nodeidx1" and "nodeidx2"
for nodeidx1 in range(nrnodes):
for nodeidx2 in range(nrnodes):
# test if nodeidx1 is different from nodeidx2
if nodeidx1!=nodeidx2 :
# compute the geodesic distance between individualas "nodeidx1" and "nodeidx2" in meters
nodedist=geodesic(G.nodes[nodeidx1]['coords'],G.nodes[nodeidx2]['coords']).meters
# if distance is below two meters connect invididuals by and edge.
if nodedist<2:
G.add_edge(nodeidx1,nodeidx2)
# read new graph object "SubGraph" using G.subgraph() consisting of nodes 0,1,2,3,4
SubGraph = G.subgraph([0,1,2,3,4])
# read out node attribute `b`from all nodes in "SubGraph" and store in variable "labels"
labels = nx.get_node_attributes(SubGraph, 'b')
# plot "SubGraph" using nx.draw_networkx() with "labels" as node labels
nx.draw_networkx(SubGraph,labels = labels )
"""## Step 2 - Personalized Diagnosiss
This step requires you to learn personalized predictors for a Covid-19 infection. To this end you will combine the gradient descent algorithm for logistic regression with a network averaging method for aggregating local gradients computed for each individual.
More formally, we assign each invidiual $i$ a linear classifier with weight vector $\mathbf{w}^{(i)}=\big(w^{(i)}_{1},\ldots,w^{(i)}_{6}\big)^{T}$ and intercept (bias) term $b^{(i)}$. Given an individual $i$ with features $\mathbf{x}^{(i)}$ (extracted from an audio recording) we diagnose a Covid-19 infection if $\mathbf{w}^{T} \mathbf{x}^{(i)} +b^{(i)} \geq0$. To learn the weight vector and intercept term for the node $i$ that belongs to the component $\mathcal{C}$ of the contact network, we use a sufficient number of gradient descent steps
$$ \mathbf{w}^{(k+1)} = \mathbf{w}^{(k)} - \alpha \mathbf{g}^{(k)} \mbox{ with } \mathbf{g}^{(k)}= (1/|\mathcal{C}|) \sum_{j \in \mathcal{C}} \big(h\big(\big(\mathbf{w}^{(k)}\big)^{T} \mathbf{x}^{(j)}\big) - y^{(j)}\big) \mathbf{x}^{(j)} $$
and
$$ b^{(k+1)} = b^{(k)} - \alpha v^{(k)} \mbox{ with } v^{(k)}= (1/|\mathcal{C}|) \sum_{j \in \mathcal{C}} \big(h\big(\big(\mathbf{w}^{(k)}\big)^{T} \mathbf{x}^{(j)}\big) - y^{(j)}\big) $$.
We will estimate the gradients $\mathbf{g}^{(k)}$ and $v^{(k)}$ using the averaging algorithm that we used in part 2 for computing the average infection rates.
The code snippet below implements the sigmoid function which maps the ouptut of a linear predictor to an estimate for the probability of having a Covid-19 infection
"""
def sigmoid(X, theta):
z = np.dot(X, theta[1:]) + theta[0]
return 1.0 / ( 1.0 + np.exp(-z))
weights_tmp_dic=nx.get_node_attributes(G,'w')
weights_tmp | |
-> Optional[pulumi.Input['ClusterAutoscaleArgs']]:
return pulumi.get(self, "autoscale")
@autoscale.setter
def autoscale(self, value: Optional[pulumi.Input['ClusterAutoscaleArgs']]):
pulumi.set(self, "autoscale", value)
@property
@pulumi.getter(name="autoterminationMinutes")
def autotermination_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "autotermination_minutes")
@autotermination_minutes.setter
def autotermination_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "autotermination_minutes", value)
@property
@pulumi.getter(name="awsAttributes")
def aws_attributes(self) -> Optional[pulumi.Input['ClusterAwsAttributesArgs']]:
return pulumi.get(self, "aws_attributes")
@aws_attributes.setter
def aws_attributes(self, value: Optional[pulumi.Input['ClusterAwsAttributesArgs']]):
pulumi.set(self, "aws_attributes", value)
@property
@pulumi.getter(name="azureAttributes")
def azure_attributes(self) -> Optional[pulumi.Input['ClusterAzureAttributesArgs']]:
return pulumi.get(self, "azure_attributes")
@azure_attributes.setter
def azure_attributes(self, value: Optional[pulumi.Input['ClusterAzureAttributesArgs']]):
pulumi.set(self, "azure_attributes", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterLogConf")
def cluster_log_conf(self) -> Optional[pulumi.Input['ClusterClusterLogConfArgs']]:
return pulumi.get(self, "cluster_log_conf")
@cluster_log_conf.setter
def cluster_log_conf(self, value: Optional[pulumi.Input['ClusterClusterLogConfArgs']]):
pulumi.set(self, "cluster_log_conf", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="customTags")
def custom_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "custom_tags")
@custom_tags.setter
def custom_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "custom_tags", value)
@property
@pulumi.getter(name="dataSecurityMode")
def data_security_mode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "data_security_mode")
@data_security_mode.setter
def data_security_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_security_mode", value)
@property
@pulumi.getter(name="defaultTags")
def default_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "default_tags")
@default_tags.setter
def default_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "default_tags", value)
@property
@pulumi.getter(name="dockerImage")
def docker_image(self) -> Optional[pulumi.Input['ClusterDockerImageArgs']]:
return pulumi.get(self, "docker_image")
@docker_image.setter
def docker_image(self, value: Optional[pulumi.Input['ClusterDockerImageArgs']]):
pulumi.set(self, "docker_image", value)
@property
@pulumi.getter(name="driverInstancePoolId")
def driver_instance_pool_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "driver_instance_pool_id")
@driver_instance_pool_id.setter
def driver_instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_instance_pool_id", value)
@property
@pulumi.getter(name="driverNodeTypeId")
def driver_node_type_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "driver_node_type_id")
@driver_node_type_id.setter
def driver_node_type_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_node_type_id", value)
@property
@pulumi.getter(name="enableElasticDisk")
def enable_elastic_disk(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_elastic_disk")
@enable_elastic_disk.setter
def enable_elastic_disk(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_elastic_disk", value)
@property
@pulumi.getter(name="enableLocalDiskEncryption")
def enable_local_disk_encryption(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_local_disk_encryption")
@enable_local_disk_encryption.setter
def enable_local_disk_encryption(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_local_disk_encryption", value)
@property
@pulumi.getter(name="gcpAttributes")
def gcp_attributes(self) -> Optional[pulumi.Input['ClusterGcpAttributesArgs']]:
return pulumi.get(self, "gcp_attributes")
@gcp_attributes.setter
def gcp_attributes(self, value: Optional[pulumi.Input['ClusterGcpAttributesArgs']]):
pulumi.set(self, "gcp_attributes", value)
@property
@pulumi.getter(name="idempotencyToken")
def idempotency_token(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "idempotency_token")
@idempotency_token.setter
def idempotency_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idempotency_token", value)
@property
@pulumi.getter(name="initScripts")
def init_scripts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]:
return pulumi.get(self, "init_scripts")
@init_scripts.setter
def init_scripts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterInitScriptArgs']]]]):
pulumi.set(self, "init_scripts", value)
@property
@pulumi.getter(name="instancePoolId")
def instance_pool_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_pool_id")
@instance_pool_id.setter
def instance_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_pool_id", value)
@property
@pulumi.getter(name="isPinned")
def is_pinned(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_pinned")
@is_pinned.setter
def is_pinned(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_pinned", value)
@property
@pulumi.getter
def libraries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]:
return pulumi.get(self, "libraries")
@libraries.setter
def libraries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterLibraryArgs']]]]):
pulumi.set(self, "libraries", value)
@property
@pulumi.getter(name="nodeTypeId")
def node_type_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_type_id")
@node_type_id.setter
def node_type_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_type_id", value)
@property
@pulumi.getter(name="numWorkers")
def num_workers(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "num_workers")
@num_workers.setter
def num_workers(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_workers", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="singleUserName")
def single_user_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "single_user_name")
@single_user_name.setter
def single_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_user_name", value)
@property
@pulumi.getter(name="sparkConf")
def spark_conf(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "spark_conf")
@spark_conf.setter
def spark_conf(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "spark_conf", value)
@property
@pulumi.getter(name="sparkEnvVars")
def spark_env_vars(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "spark_env_vars")
@spark_env_vars.setter
def spark_env_vars(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "spark_env_vars", value)
@property
@pulumi.getter(name="sparkVersion")
def spark_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "spark_version")
@spark_version.setter
def spark_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spark_version", value)
@property
@pulumi.getter(name="sshPublicKeys")
def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ssh_public_keys")
@ssh_public_keys.setter
def ssh_public_keys(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_public_keys", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
autoscale: Optional[pulumi.Input[pulumi.InputType['ClusterAutoscaleArgs']]] = None,
autotermination_minutes: Optional[pulumi.Input[int]] = None,
aws_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAwsAttributesArgs']]] = None,
azure_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAzureAttributesArgs']]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_log_conf: Optional[pulumi.Input[pulumi.InputType['ClusterClusterLogConfArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
custom_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
data_security_mode: Optional[pulumi.Input[str]] = None,
docker_image: Optional[pulumi.Input[pulumi.InputType['ClusterDockerImageArgs']]] = None,
driver_instance_pool_id: Optional[pulumi.Input[str]] = None,
driver_node_type_id: Optional[pulumi.Input[str]] = None,
enable_elastic_disk: Optional[pulumi.Input[bool]] = None,
enable_local_disk_encryption: Optional[pulumi.Input[bool]] = None,
gcp_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterGcpAttributesArgs']]] = None,
idempotency_token: Optional[pulumi.Input[str]] = None,
init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterInitScriptArgs']]]]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
is_pinned: Optional[pulumi.Input[bool]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterLibraryArgs']]]]] = None,
node_type_id: Optional[pulumi.Input[str]] = None,
num_workers: Optional[pulumi.Input[int]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
single_user_name: Optional[pulumi.Input[str]] = None,
spark_conf: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_env_vars: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a Cluster resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Cluster resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
autoscale: Optional[pulumi.Input[pulumi.InputType['ClusterAutoscaleArgs']]] = None,
autotermination_minutes: Optional[pulumi.Input[int]] = None,
aws_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAwsAttributesArgs']]] = None,
azure_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAzureAttributesArgs']]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_log_conf: Optional[pulumi.Input[pulumi.InputType['ClusterClusterLogConfArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
custom_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
data_security_mode: Optional[pulumi.Input[str]] = None,
docker_image: Optional[pulumi.Input[pulumi.InputType['ClusterDockerImageArgs']]] = None,
driver_instance_pool_id: Optional[pulumi.Input[str]] = None,
driver_node_type_id: Optional[pulumi.Input[str]] = None,
enable_elastic_disk: Optional[pulumi.Input[bool]] = None,
enable_local_disk_encryption: Optional[pulumi.Input[bool]] = None,
gcp_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterGcpAttributesArgs']]] = None,
idempotency_token: Optional[pulumi.Input[str]] = None,
init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterInitScriptArgs']]]]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
is_pinned: Optional[pulumi.Input[bool]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterLibraryArgs']]]]] = None,
node_type_id: Optional[pulumi.Input[str]] = None,
num_workers: Optional[pulumi.Input[int]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
single_user_name: Optional[pulumi.Input[str]] = None,
spark_conf: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_env_vars: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["autoscale"] = autoscale
__props__.__dict__["autotermination_minutes"] = autotermination_minutes
__props__.__dict__["aws_attributes"] = aws_attributes
__props__.__dict__["azure_attributes"] = azure_attributes
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_log_conf"] = cluster_log_conf
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["custom_tags"] = custom_tags
__props__.__dict__["data_security_mode"] = data_security_mode
__props__.__dict__["docker_image"] = docker_image
__props__.__dict__["driver_instance_pool_id"] = driver_instance_pool_id
__props__.__dict__["driver_node_type_id"] = driver_node_type_id
__props__.__dict__["enable_elastic_disk"] = enable_elastic_disk
__props__.__dict__["enable_local_disk_encryption"] = enable_local_disk_encryption
__props__.__dict__["gcp_attributes"] = gcp_attributes
__props__.__dict__["idempotency_token"] = idempotency_token
__props__.__dict__["init_scripts"] = init_scripts
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["is_pinned"] = is_pinned
__props__.__dict__["libraries"] = libraries
__props__.__dict__["node_type_id"] = node_type_id
__props__.__dict__["num_workers"] = num_workers
__props__.__dict__["policy_id"] = policy_id
__props__.__dict__["single_user_name"] = single_user_name
__props__.__dict__["spark_conf"] = spark_conf
__props__.__dict__["spark_env_vars"] = spark_env_vars
if spark_version is None and not opts.urn:
raise TypeError("Missing required property 'spark_version'")
__props__.__dict__["spark_version"] = spark_version
__props__.__dict__["ssh_public_keys"] = ssh_public_keys
__props__.__dict__["default_tags"] = None
__props__.__dict__["state"] = None
__props__.__dict__["url"] = None
super(Cluster, __self__).__init__(
'databricks:databricks/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
autoscale: Optional[pulumi.Input[pulumi.InputType['ClusterAutoscaleArgs']]] = None,
autotermination_minutes: Optional[pulumi.Input[int]] = None,
aws_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAwsAttributesArgs']]] = None,
azure_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterAzureAttributesArgs']]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_log_conf: Optional[pulumi.Input[pulumi.InputType['ClusterClusterLogConfArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
custom_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
data_security_mode: Optional[pulumi.Input[str]] = None,
default_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
docker_image: Optional[pulumi.Input[pulumi.InputType['ClusterDockerImageArgs']]] = None,
driver_instance_pool_id: Optional[pulumi.Input[str]] = None,
driver_node_type_id: Optional[pulumi.Input[str]] = None,
enable_elastic_disk: Optional[pulumi.Input[bool]] = None,
enable_local_disk_encryption: Optional[pulumi.Input[bool]] = None,
gcp_attributes: Optional[pulumi.Input[pulumi.InputType['ClusterGcpAttributesArgs']]] = None,
idempotency_token: Optional[pulumi.Input[str]] = None,
init_scripts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterInitScriptArgs']]]]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
is_pinned: Optional[pulumi.Input[bool]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterLibraryArgs']]]]] = None,
node_type_id: Optional[pulumi.Input[str]] = None,
num_workers: Optional[pulumi.Input[int]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
single_user_name: Optional[pulumi.Input[str]] = None,
spark_conf: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_env_vars: Optional[pulumi.Input[Mapping[str, Any]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
ssh_public_keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
state: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClusterState.__new__(_ClusterState)
__props__.__dict__["autoscale"] = autoscale
__props__.__dict__["autotermination_minutes"] = autotermination_minutes
__props__.__dict__["aws_attributes"] = aws_attributes
__props__.__dict__["azure_attributes"] = azure_attributes
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_log_conf"] = cluster_log_conf
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["custom_tags"] = custom_tags
__props__.__dict__["data_security_mode"] | |
"""
Tiny Web - pretty simple and powerful web server for tiny platforms like ESP8266 / ESP32
MIT license
(C) <NAME> 2017-2018
"""
import logging
import uasyncio as asyncio
import uasyncio.core
import ujson as json
import gc
import uos as os
import sys
import uerrno as errno
import usocket as socket
log = logging.getLogger('WEB')
type_gen = type((lambda: (yield))())
# uasyncio v3 is shipped with MicroPython 1.13, and contains some subtle
# but breaking changes. See also https://github.com/peterhinch/micropython-async/blob/master/v3/README.md
IS_UASYNCIO_V3 = hasattr(asyncio, "__version__") and asyncio.__version__ >= (3,)
def urldecode_plus(s):
"""Decode urlencoded string (including '+' char).
Returns decoded string
"""
s = s.replace('+', ' ')
arr = s.split('%')
res = arr[0]
for it in arr[1:]:
if len(it) >= 2:
res += chr(int(it[:2], 16)) + it[2:]
elif len(it) == 0:
res += '%'
else:
res += it
return res
def parse_query_string(s):
"""Parse urlencoded string into dict.
Returns dict
"""
res = {}
pairs = s.split('&')
for p in pairs:
vals = [urldecode_plus(x) for x in p.split('=', 1)]
if len(vals) == 1:
res[vals[0]] = ''
else:
res[vals[0]] = vals[1]
return res
class HTTPException(Exception):
"""HTTP protocol exceptions"""
def __init__(self, code=400):
self.code = code
class request:
"""HTTP Request class"""
def __init__(self, _reader):
self.reader = _reader
self.headers = {}
self.method = b''
self.path = b''
self.query_string = b''
async def read_request_line(self):
"""Read and parse first line (AKA HTTP Request Line).
Function is generator.
Request line is something like:
GET /something/script?param1=val1 HTTP/1.1
"""
while True:
rl = await self.reader.readline()
# skip empty lines
if rl == b'\r\n' or rl == b'\n':
continue
break
rl_frags = rl.split()
if len(rl_frags) != 3:
raise HTTPException(400)
self.method = rl_frags[0]
url_frags = rl_frags[1].split(b'?', 1)
self.path = url_frags[0]
if len(url_frags) > 1:
self.query_string = url_frags[1]
async def read_headers(self, save_headers=[]):
"""Read and parse HTTP headers until \r\n\r\n:
Optional argument 'save_headers' controls which headers to save.
This is done mostly to deal with memory constrains.
Function is generator.
HTTP headers could be like:
Host: google.com
Content-Type: blah
\r\n
"""
while True:
gc.collect()
line = await self.reader.readline()
if line == b'\r\n':
break
frags = line.split(b':', 1)
if len(frags) != 2:
raise HTTPException(400)
if frags[0] in save_headers:
self.headers[frags[0]] = frags[1].strip()
async def read_parse_form_data(self):
"""Read HTTP form data (payload), if any.
Function is generator.
Returns:
- dict of key / value pairs
- None in case of no form data present
"""
# TODO: Probably there is better solution how to handle
# request body, at least for simple urlencoded forms - by processing
# chunks instead of accumulating payload.
gc.collect()
if b'Content-Length' not in self.headers:
return {}
# Parse payload depending on content type
if b'Content-Type' not in self.headers:
# Unknown content type, return unparsed, raw data
return {}
size = int(self.headers[b'Content-Length'])
if size > self.params['max_body_size'] or size < 0:
raise HTTPException(413)
data = await self.reader.readexactly(size)
# Use only string before ';', e.g:
# application/x-www-form-urlencoded; charset=UTF-8
ct = self.headers[b'Content-Type'].split(b';', 1)[0]
try:
if ct == b'application/json':
return json.loads(data)
elif ct == b'application/x-www-form-urlencoded':
return parse_query_string(data.decode())
except ValueError:
# Re-generate exception for malformed form data
raise HTTPException(400)
class response:
"""HTTP Response class"""
def __init__(self, _writer, buffer_size=128):
self.writer = _writer
self.buffer_size = buffer_size
self.send = _writer.awrite
self.code = 200
self.version = '1.0'
self.headers = {}
async def _send_headers(self):
"""Compose and send:
- HTTP request line
- HTTP headers following by \r\n.
This function is generator.
P.S.
Because of usually we have only a few HTTP headers (2-5) it doesn't make sense
to send them separately - sometimes it could increase latency.
So combining headers together and send them as single "packet".
"""
# Request line
hdrs = 'HTTP/{} {} MSG\r\n'.format(self.version, self.code)
# Headers
for k, v in self.headers.items():
hdrs += '{}: {}\r\n'.format(k, v)
hdrs += '\r\n'
# Collect garbage after small mallocs
gc.collect()
await self.send(hdrs)
async def error(self, code, msg=None):
"""Generate HTTP error response
This function is generator.
Arguments:
code - HTTP response code
Example:
# Not enough permissions. Send HTTP 403 - Forbidden
await resp.error(403)
"""
self.code = code
if msg:
self.add_header('Content-Length', len(msg))
await self._send_headers()
if msg:
await self.send(msg)
async def redirect(self, location, msg=None):
"""Generate HTTP redirect response to 'location'.
Basically it will generate HTTP 302 with 'Location' header
Arguments:
location - URL to redirect to
Example:
# Redirect to /something
await resp.redirect('/something')
"""
self.code = 302
self.add_header('Location', location)
if msg:
self.add_header('Content-Length', len(msg))
await self._send_headers()
if msg:
await self.send(msg)
def add_header(self, key, value):
"""Add HTTP response header
Arguments:
key - header name
value - header value
Example:
resp.add_header('Content-Encoding', 'gzip')
"""
self.headers[key] = value
def add_access_control_headers(self):
"""Add Access Control related HTTP response headers.
This is required when working with RestApi (JSON requests)
"""
self.add_header('Access-Control-Allow-Origin', self.params['allowed_access_control_origins'])
self.add_header('Access-Control-Allow-Methods', self.params['allowed_access_control_methods'])
self.add_header('Access-Control-Allow-Headers', self.params['allowed_access_control_headers'])
async def start_html(self):
"""Start response with HTML content type.
This function is generator.
Example:
await resp.start_html()
await resp.send('<html><h1>Hello, world!</h1></html>')
"""
self.add_header('Content-Type', 'text/html')
await self._send_headers()
async def send_file(self, filename, content_type=None, content_encoding=None, max_age=2592000):
"""Send local file as HTTP response.
This function is generator.
Arguments:
filename - Name of file which exists in local filesystem
Keyword arguments:
content_type - Filetype. By default - None means auto-detect.
max_age - Cache control. How long browser can keep this file on disk.
By default - 30 days
Set to 0 - to disable caching.
Example 1: Default use case:
await resp.send_file('images/cat.jpg')
Example 2: Disable caching:
await resp.send_file('static/index.html', max_age=0)
Example 3: Override content type:
await resp.send_file('static/file.bin', content_type='application/octet-stream')
"""
try:
# Get file size
stat = os.stat(filename)
slen = str(stat[6])
self.add_header('Content-Length', slen)
# Find content type
if content_type:
self.add_header('Content-Type', content_type)
# Add content-encoding, if any
if content_encoding:
self.add_header('Content-Encoding', content_encoding)
# Since this is static content is totally make sense
# to tell browser to cache it, however, you can always
# override it by setting max_age to zero
self.add_header('Cache-Control', 'max-age={}, public'.format(max_age))
with open(filename) as f:
await self._send_headers()
gc.collect()
buf = bytearray(self.buffer_size)
while True:
size = f.readinto(buf)
if size == 0:
break
await self.send(buf, sz=size)
except OSError as e:
# special handling for ENOENT / EACCESS
if e.args[0] in (errno.ENOENT, errno.EACCES):
raise HTTPException(404)
else:
raise
async def restful_resource_handler(req, resp, param=None):
"""Handler for RESTful API endpoins"""
# Gather data - query string, JSON in request body...
data = await req.read_parse_form_data()
# Add parameters from URI query string as well
# This one is actually for simply development of RestAPI
if req.query_string != b'':
data.update(parse_query_string(req.query_string.decode()))
# Call actual handler
_handler, _kwargs = req.params['_callmap'][req.method]
# Collect garbage before / after handler execution
gc.collect()
if param:
res = _handler(data, param, **_kwargs)
else:
res = _handler(data, **_kwargs)
gc.collect()
# Handler result could be:
# 1. generator - in case of large payload
# 2. string - just string :)
# 2. dict - meaning client what tinyweb to convert it to JSON
# it can also return error code together with str / dict
# res = {'blah': 'blah'}
# res = {'blah': 'blah'}, 201
if isinstance(res, type_gen):
# Result is generator, use chunked response
# NOTICE: HTTP 1.0 by itself does not support chunked responses, so, making workaround:
# Response is HTTP/1.1 with Connection: close
resp.version = '1.1'
resp.add_header('Connection', 'close')
resp.add_header('Content-Type', 'application/json')
resp.add_header('Transfer-Encoding', 'chunked')
resp.add_access_control_headers()
await resp._send_headers()
# Drain generator
for chunk in res:
chunk_len = len(chunk.encode('utf-8'))
await resp.send('{:x}\r\n'.format(chunk_len))
await resp.send(chunk)
await resp.send('\r\n')
gc.collect()
await resp.send('0\r\n\r\n')
else:
if type(res) == tuple:
resp.code = res[1]
res = res[0]
elif res is None:
raise Exception('Result expected')
# Send response
if type(res) is dict:
res_str = json.dumps(res)
else:
res_str = res
resp.add_header('Content-Type', 'application/json')
resp.add_header('Content-Length', str(len(res_str)))
resp.add_access_control_headers()
await resp._send_headers()
await resp.send(res_str)
class webserver:
def __init__(self, request_timeout=3, max_concurrency=3, backlog=16, buffer_size=128, debug=False):
"""Tiny Web Server class.
Keyword arguments:
request_timeout - Time for client to send complete request
after that connection will be closed.
max_concurrency - How many connections can be processed concurrently.
It is very important to limit this number because of
memory constrain.
Default value depends on platform
backlog - Parameter to socket.listen() function. Defines size of
pending to be accepted connections queue.
Must be greater than max_concurrency
debug - Whether send exception info (text + backtrace)
to client together with HTTP 500 or not.
"""
self.loop | |
# Tests invocation of the interpreter przy various command line arguments
# Most tests are executed przy environment variables ignored
# See test_cmd_line_script.py dla testing of script execution
zaimportuj test.support, unittest
zaimportuj os
zaimportuj shutil
zaimportuj sys
zaimportuj subprocess
zaimportuj tempfile
z test.support zaimportuj script_helper
z test.support.script_helper zaimportuj (spawn_python, kill_python, assert_python_ok,
assert_python_failure)
# XXX (ncoghlan): Move to script_helper oraz make consistent przy run_python
def _kill_python_and_exit_code(p):
data = kill_python(p)
returncode = p.wait()
zwróć data, returncode
klasa CmdLineTest(unittest.TestCase):
def test_directories(self):
assert_python_failure('.')
assert_python_failure('< .')
def verify_valid_flag(self, cmd_line):
rc, out, err = assert_python_ok(*cmd_line)
self.assertPrawda(out == b'' albo out.endswith(b'\n'))
self.assertNotIn(b'Traceback', out)
self.assertNotIn(b'Traceback', err)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
rc, out, err = assert_python_ok('-h')
self.assertIn(b'usage', out)
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
dla switch w '-V', '--version':
rc, out, err = assert_python_ok(switch)
self.assertNieprawda(err.startswith(version))
self.assertPrawda(out.startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an zaimportuj to happen (dla the output
# codec), a recursion loop can occur.
rc, out, err = assert_python_ok('-v')
self.assertNotIn(b'stack overflow', err)
rc, out, err = assert_python_ok('-vv')
self.assertNotIn(b'stack overflow', err)
def test_xoptions(self):
def get_xoptions(*args):
# use subprocess module directly because test.support.script_helper adds
# "-X faulthandler" to the command line
args = (sys.executable, '-E') + args
args += ('-c', 'zaimportuj sys; print(sys._xoptions)')
out = subprocess.check_output(args)
opts = eval(out.splitlines()[0])
zwróć opts
opts = get_xoptions()
self.assertEqual(opts, {})
opts = get_xoptions('-Xa', '-Xb=c,d=e')
self.assertEqual(opts, {'a': Prawda, 'b': 'c,d=e'})
def test_showrefcount(self):
def run_python(*args):
# this jest similar to assert_python_ok but doesn't strip
# the refcount z stderr. It can be replaced once
# assert_python_ok stops doing that.
cmd = [sys.executable]
cmd.extend(args)
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
p.stdout.close()
p.stderr.close()
rc = p.returncode
self.assertEqual(rc, 0)
zwróć rc, out, err
code = 'zaimportuj sys; print(sys._xoptions)'
# normally the refcount jest hidden
rc, out, err = run_python('-c', code)
self.assertEqual(out.rstrip(), b'{}')
self.assertEqual(err, b'')
# "-X showrefcount" shows the refcount, but only w debug builds
rc, out, err = run_python('-X', 'showrefcount', '-c', code)
self.assertEqual(out.rstrip(), b"{'showrefcount': Prawda}")
jeżeli hasattr(sys, 'gettotalrefcount'): # debug build
self.assertRegex(err, br'^\[\d+ refs, \d+ blocks\]')
inaczej:
self.assertEqual(err, b'')
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
assert_python_failure('-m')
# Check we get an error dla a nonexistent module
assert_python_failure('-m', 'fnord43520xyz')
# Check the runpy module also gives an error for
# a nonexistent module
assert_python_failure('-m', 'runpy', 'fnord43520xyz')
# All good jeżeli module jest located oraz run successfully
assert_python_ok('-m', 'timeit', '-n', '1')
def test_run_module_bug1764407(self):
# -m oraz -i need to play well together
# Runs the timeit module oraz checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = kill_python(p)
self.assertPrawda(data.find(b'1 loop') != -1)
self.assertPrawda(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
assert_python_failure('-c')
# Check we get an error dla an uncaught exception
assert_python_failure('-c', 'raise Exception')
# All good jeżeli execution jest successful
assert_python_ok('-c', 'pass')
@unittest.skipUnless(test.support.FS_NONASCII, 'need support.FS_NONASCII')
def test_non_ascii(self):
# Test handling of non-ascii data
command = ("assert(ord(%r) == %s)"
% (test.support.FS_NONASCII, ord(test.support.FS_NONASCII)))
assert_python_ok('-c', command)
# On Windows, dalej bytes to subprocess doesn't test how Python decodes the
# command line, but how subprocess does decode bytes to unicode. Python
# doesn't decode the command line because Windows provides directly the
# arguments jako unicode (using wmain() instead of main()).
@unittest.skipIf(sys.platform == 'win32',
'Windows has a native unicode API')
def test_undecodable_code(self):
undecodable = b"\xff"
env = os.environ.copy()
# Use C locale to get ascii dla the locale encoding
env['LC_ALL'] = 'C'
code = (
b'zaimportuj locale; '
b'print(ascii("' + undecodable + b'"), '
b'locale.getpreferredencoding())')
p = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = p.communicate()
jeżeli p.returncode == 1:
# _Py_char2wchar() decoded b'\xff' jako '\udcff' (b'\xff' jest nie
# decodable z ASCII) oraz run_command() failed on
# PyUnicode_AsUTF8String(). This jest the expected behaviour on
# Linux.
pattern = b"Unable to decode the command z the command line:"
albo_inaczej p.returncode == 0:
# _Py_char2wchar() decoded b'\xff' jako '\xff' even jeżeli the locale jest
# C oraz the locale encoding jest ASCII. It occurs on FreeBSD, Solaris
# oraz Mac OS X.
pattern = b"'\\xff' "
# The output jest followed by the encoding name, an alias to ASCII.
# Examples: "US-ASCII" albo "646" (ISO 646, on Solaris).
inaczej:
podnieś AssertionError("Unknown exit code: %s, output=%a" % (p.returncode, stdout))
jeżeli nie stdout.startswith(pattern):
podnieś AssertionError("%a doesn't start przy %a" % (stdout, pattern))
@unittest.skipUnless(sys.platform == 'darwin', 'test specific to Mac OS X')
def test_osx_utf8(self):
def check_output(text):
decoded = text.decode('utf-8', 'surrogateescape')
expected = ascii(decoded).encode('ascii') + b'\n'
env = os.environ.copy()
# C locale gives ASCII locale encoding, but Python uses UTF-8
# to parse the command line arguments on Mac OS X
env['LC_ALL'] = 'C'
p = subprocess.Popen(
(sys.executable, "-c", "zaimportuj sys; print(ascii(sys.argv[1]))", text),
stdout=subprocess.PIPE,
env=env)
stdout, stderr = p.communicate()
self.assertEqual(stdout, expected)
self.assertEqual(p.returncode, 0)
# test valid utf-8
text = 'e:\xe9, euro:\u20ac, non-bmp:\U0010ffff'.encode('utf-8')
check_output(text)
# test invalid utf-8
text = (
b'\xff' # invalid byte
b'\xc3\xa9' # valid utf-8 character
b'\xc3\xff' # invalid byte sequence
b'\xed\xa0\x80' # lone surrogate character (invalid)
)
check_output(text)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
dla stream w ('stdout', 'stderr'):
# Binary jest unbuffered
code = ("zaimportuj os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err jeżeli stream == 'stderr' inaczej out
self.assertEqual(data, b'x', "binary %s nie unbuffered" % stream)
# Text jest line-buffered
code = ("zaimportuj os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err jeżeli stream == 'stderr' inaczej out
self.assertEqual(data.strip(), b'x',
"text %s nie line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works przy '-u'
code = ("zaimportuj sys; sys.stdout.write(sys.stdin.read(1))")
p = spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertPrawda(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
path = path1 + os.pathsep + path2
code = """jeżeli 1:
zaimportuj sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc, out, err = assert_python_ok('-S', '-c', code,
PYTHONPATH=path)
self.assertIn(path1.encode('ascii'), out)
self.assertIn(path2.encode('ascii'), out)
def test_empty_PYTHONPATH_issue16309(self):
# On Posix, it jest documented that setting PATH to the
# empty string jest equivalent to nie setting PATH at all,
# which jest an exception to the rule that w a string like
# "/bin::/usr/bin" the empty string w the middle gets
# interpreted jako '.'
code = """jeżeli 1:
zaimportuj sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc1, out1, err1 = assert_python_ok('-c', code, PYTHONPATH="")
rc2, out2, err2 = assert_python_ok('-c', code, __isolated=Nieprawda)
# regarding to Posix specification, outputs should be equal
# dla empty oraz unset PYTHONPATH
self.assertEqual(out1, out2)
def test_displayhook_unencodable(self):
dla encoding w ('ascii', 'latin-1', 'utf-8'):
# We are testing a PYTHON environment variable here, so we can't
# use -E, -I, albo script_helper (which uses them). So instead we do
# poor-man's isolation by deleting the PYTHON vars z env.
env = {key:value dla (key,value) w os.environ.copy().items()
jeżeli nie key.startswith('PYTHON')}
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(
[sys.executable, '-i'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
# non-ascii, surrogate, non-BMP printable, non-BMP unprintable
text = "a=\xe9 b=\uDC80 c=\U00010000 d=\U0010FFFF"
p.stdin.write(ascii(text).encode('ascii') + b"\n")
p.stdin.write(b'exit()\n')
data = kill_python(p)
escaped = repr(text).encode(encoding, 'backslashreplace')
self.assertIn(escaped, data)
def check_input(self, code, expected):
przy tempfile.NamedTemporaryFile("wb+") jako stdin:
sep = os.linesep.encode('ASCII')
stdin.write(sep.join((b'abc', b'def')))
stdin.flush()
stdin.seek(0)
przy subprocess.Popen(
(sys.executable, "-c", code),
stdin=stdin, stdout=subprocess.PIPE) jako proc:
stdout, stderr = proc.communicate()
self.assertEqual(stdout.rstrip(), expected)
def test_stdin_readline(self):
# Issue #11272: check that sys.stdin.readline() replaces '\r\n' by '\n'
# on Windows (sys.stdin jest opened w binary mode)
self.check_input(
"zaimportuj sys; print(repr(sys.stdin.readline()))",
b"'abc\\n'")
def test_builtin_input(self):
# Issue #11272: check that input() strips newlines ('\n' albo '\r\n')
self.check_input(
"print(repr(input()))",
b"'abc'")
def test_output_newline(self):
# Issue 13119 Newline dla print() should be \r\n on Windows.
code = """jeżeli 1:
zaimportuj sys
print(1)
print(2)
print(3, file=sys.stderr)
print(4, file=sys.stderr)"""
rc, out, err = assert_python_ok('-c', | |
<reponame>acordonez/e3sm_diags
from __future__ import print_function
import os
import cartopy.crs as ccrs
import cdutil
import matplotlib
import numpy as np
import numpy.ma as ma
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
from numpy.polynomial.polynomial import polyfit
from e3sm_diags.derivations.default_regions import regions_specs
from e3sm_diags.driver.utils.general import get_output_dir
from e3sm_diags.logger import custom_logger
from e3sm_diags.plot import get_colormap
matplotlib.use("Agg")
import matplotlib.colors as colors # isort:skip # noqa: E402
import matplotlib.pyplot as plt # isort:skip # noqa: E402
logger = custom_logger(__name__)
plotTitle = {"fontsize": 11.5}
plotSideTitle = {"fontsize": 9.5}
# Position and sizes of subplot axes in page coordinates (0 to 1)
panel = [
(0.1691, 0.6810, 0.6465, 0.2258),
(0.1691, 0.3961, 0.6465, 0.2258),
(0.1691, 0.1112, 0.6465, 0.2258),
]
# Border padding relative to subplot axes for saving individual panels
# (left, bottom, right, top) in page coordinates
border = (-0.06, -0.03, 0.13, 0.03)
def add_cyclic(var):
lon = var.getLongitude()
return var(longitude=(lon[0], lon[0] + 360.0, "coe"))
def get_ax_size(fig, ax):
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height
def determine_tick_step(degrees_covered):
if degrees_covered > 180:
return 60
if degrees_covered > 60:
return 30
elif degrees_covered > 20:
return 10
else:
return 1
def plot_panel_map(
n, fig, proj, var, clevels, cmap, title, parameter, conf=None, stats={}
):
var = add_cyclic(var)
lon = var.getLongitude()
lat = var.getLatitude()
var = ma.squeeze(var.asma())
# Contour levels
levels = None
norm = None
if len(clevels) > 0:
levels = [-1.0e8] + clevels + [1.0e8]
norm = colors.BoundaryNorm(boundaries=levels, ncolors=256)
# Contour plot
ax = fig.add_axes(panel[n], projection=proj)
region_str = parameter.regions[0]
region = regions_specs[region_str]
if "domain" in region.keys(): # type: ignore
# Get domain to plot
domain = region["domain"] # type: ignore
else:
# Assume global domain
domain = cdutil.region.domain(latitude=(-90.0, 90, "ccb"))
kargs = domain.components()[0].kargs
lon_west, lon_east, lat_south, lat_north = (0, 360, -90, 90)
if "longitude" in kargs:
lon_west, lon_east, _ = kargs["longitude"]
if "latitude" in kargs:
lat_south, lat_north, _ = kargs["latitude"]
lon_covered = lon_east - lon_west
lon_step = determine_tick_step(lon_covered)
xticks = np.arange(lon_west, lon_east, lon_step)
# Subtract 0.50 to get 0 W to show up on the right side of the plot.
# If less than 0.50 is subtracted, then 0 W will overlap 0 E on the left side of the plot.
# If a number is added, then the value won't show up at all.
xticks = np.append(xticks, lon_east - 0.50)
lat_covered = lat_north - lat_south
lat_step = determine_tick_step(lat_covered)
yticks = np.arange(lat_south, lat_north, lat_step)
yticks = np.append(yticks, lat_north)
ax.set_extent([lon_west, lon_east, lat_south, lat_north], crs=proj)
cmap = get_colormap(cmap, parameter)
contours = ax.contourf(
lon,
lat,
var,
transform=ccrs.PlateCarree(),
norm=norm,
levels=levels,
cmap=cmap,
extend="both",
)
if conf is not None:
conf = add_cyclic(conf)
conf = ma.squeeze(conf.asma())
# Values in conf will be either 0 or 1. Thus, there are only two levels -
# represented by the no-hatching and hatching levels.
ax.contourf(
lon,
lat,
conf,
2,
transform=ccrs.PlateCarree(),
norm=norm,
colors="none",
extend="both",
hatches=[None, "//"],
)
# Full world would be aspect 360/(2*180) = 1
ax.set_aspect((lon_east - lon_west) / (2 * (lat_north - lat_south)))
ax.coastlines(lw=0.3)
if title[0] is not None:
ax.set_title(title[0], loc="left", fontdict=plotSideTitle)
if title[1] is not None:
ax.set_title(title[1], fontdict=plotTitle)
if title[2] is not None:
ax.set_title(title[2], loc="right", fontdict=plotSideTitle)
ax.set_xticks(xticks, crs=ccrs.PlateCarree())
ax.set_yticks(yticks, crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True, number_format=".0f")
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.tick_params(labelsize=8.0, direction="out", width=1)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
# Place a vertical line in the middle of the plot - i.e. 180 degrees
ax.axvline(x=0.5, color="k", linewidth=0.5)
# Color bar
cbax = fig.add_axes((panel[n][0] + 0.6635, panel[n][1] + 0.0115, 0.0326, 0.1792))
cbar = fig.colorbar(contours, cax=cbax)
w, h = get_ax_size(fig, cbax)
if levels is None:
cbar.ax.tick_params(labelsize=9.0, length=0)
else:
maxval = np.amax(np.absolute(levels[1:-1]))
if maxval < 1.0:
fmt = "%5.3f"
pad = 30
elif maxval < 10.0:
fmt = "%5.2f"
pad = 25
elif maxval < 100.0:
fmt = "%5.1f"
pad = 25
else:
fmt = "%6.1f"
pad = 30
cbar.set_ticks(levels[1:-1])
labels = [fmt % level for level in levels[1:-1]]
cbar.ax.set_yticklabels(labels, ha="right")
cbar.ax.tick_params(labelsize=9.0, pad=pad, length=0)
# Display stats
if stats:
top_stats = (stats["max"], stats["min"], stats["mean"], stats["std"])
top_text = "Max\nMin\nMean\nSTD"
fig.text(
panel[n][0] + 0.6635,
panel[n][1] + 0.2107,
top_text,
ha="left",
fontdict=plotSideTitle,
)
fig.text(
panel[n][0] + 0.7635,
panel[n][1] + 0.2107,
"%.2f\n%.2f\n%.2f\n%.2f" % top_stats,
ha="right",
fontdict=plotSideTitle,
)
if "rmse" in stats.keys():
bottom_stats = (stats["rmse"], stats["corr"])
bottom_text = "RMSE\nCORR"
fig.text(
panel[n][0] + 0.6635,
panel[n][1] - 0.0205,
bottom_text,
ha="left",
fontdict=plotSideTitle,
)
fig.text(
panel[n][0] + 0.7635,
panel[n][1] - 0.0205,
"%.2f\n%.2f" % bottom_stats,
ha="right",
fontdict=plotSideTitle,
)
# Hatch text
if conf is not None:
hatch_text = "Hatched when pvalue < 0.05"
fig.text(
panel[n][0] + 0.25,
panel[n][1] - 0.0355,
hatch_text,
ha="right",
fontdict=plotSideTitle,
)
def plot_map(
reference,
test,
diff,
metrics_dict,
ref_confidence_levels,
test_confidence_levels,
parameter,
):
if parameter.backend not in ["cartopy", "mpl", "matplotlib"]:
return
# Create figure, projection
fig = plt.figure(figsize=parameter.figsize, dpi=parameter.dpi)
# Use 179.99 as central longitude due to https://github.com/SciTools/cartopy/issues/946
# proj = ccrs.PlateCarree(central_longitude=180)
proj = ccrs.PlateCarree(central_longitude=179.99)
# Use non-regridded test and ref for stats,
# so we have the original stats displayed
# First panel
plot_panel_map(
0,
fig,
proj,
test,
parameter.contour_levels,
parameter.test_colormap,
(parameter.test_name_yrs, parameter.test_title, test.units),
parameter,
conf=test_confidence_levels,
stats=metrics_dict["test"],
)
# Second panel
plot_panel_map(
1,
fig,
proj,
reference,
parameter.contour_levels,
parameter.reference_colormap,
(parameter.ref_name_yrs, parameter.reference_title, reference.units),
parameter,
conf=ref_confidence_levels,
stats=metrics_dict["ref"],
)
# Third panel
plot_panel_map(
2,
fig,
proj,
diff,
parameter.diff_levels,
parameter.diff_colormap,
(None, parameter.diff_title, test.units),
parameter,
stats=metrics_dict["diff"],
)
# Figure title
fig.suptitle(parameter.main_title, x=0.5, y=0.97, fontsize=15)
# Prepare to save figure
# get_output_dir => {parameter.results_dir}/{set_name}/{parameter.case_id}
# => {parameter.results_dir}/enso_diags/{parameter.case_id}
output_dir = get_output_dir(parameter.current_set, parameter)
if parameter.print_statements:
logger.info("Output dir: {}".format(output_dir))
# get_output_dir => {parameter.orig_results_dir}/{set_name}/{parameter.case_id}
# => {parameter.orig_results_dir}/enso_diags/{parameter.case_id}
original_output_dir = get_output_dir(parameter.current_set, parameter)
if parameter.print_statements:
logger.info("Original output dir: {}".format(original_output_dir))
# parameter.output_file is defined in e3sm_diags/driver/enso_diags_driver.py
# {parameter.results_dir}/enso_diags/{parameter.case_id}/{parameter.output_file}
file_path = os.path.join(output_dir, parameter.output_file)
# {parameter.orig_results_dir}/enso_diags/{parameter.case_id}/{parameter.output_file}
original_file_path = os.path.join(original_output_dir, parameter.output_file)
# Save figure
for f in parameter.output_format:
f = f.lower().split(".")[-1]
plot_suffix = "." + f
plot_file_path = file_path + plot_suffix
plt.savefig(plot_file_path)
# Get the filename that the user has passed in and display that.
original_plot_file_path = original_file_path + plot_suffix
logger.info(f"Plot saved in: {original_plot_file_path}")
# Save individual subplots
for f in parameter.output_format_subplot:
page = fig.get_size_inches()
i = 0
for p in panel:
# Extent of subplot
subpage = np.array(p).reshape(2, 2)
subpage[1, :] = subpage[0, :] + subpage[1, :]
subpage = subpage + np.array(border).reshape(2, 2)
subpage = list(((subpage) * page).flatten())
extent = matplotlib.transforms.Bbox.from_extents(*subpage)
# Save subplot
subplot_suffix = ".%i." % (i) + f
subplot_file_path = file_path + subplot_suffix
plt.savefig(subplot_file_path, bbox_inches=extent)
# Get the filename that the user has passed in and display that.
original_subplot_file_path = original_file_path + subplot_suffix
logger.info(f"Sub-plot saved in: {original_subplot_file_path}")
i += 1
plt.close()
def plot_scatter(x, y, parameter):
fig = plt.figure(figsize=parameter.figsize, dpi=parameter.dpi)
test_color = "black"
ref_color = "red"
test_title = "Test" if parameter.test_title == "" else parameter.test_title
if parameter.test_name_yrs:
test_title += " : {}".format(parameter.test_name_yrs)
ref_title = (
"Reference" if parameter.reference_title == "" else parameter.reference_title
)
if parameter.ref_name_yrs:
ref_title += " : {}".format(parameter.ref_name_yrs)
# https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
plt.scatter(
x["test"],
y["test"],
label=test_title,
color=test_color,
marker="s",
s=8,
)
plt.scatter(x["ref"], y["ref"], label=ref_title, color=ref_color, marker="o", s=8)
for value_type in ["test", "ref"]:
if value_type == "test":
type_str = "Test"
type_color = test_color
x_range = (min(x["test"]), max(x["test"]))
else:
type_str = "Reference"
type_color = ref_color
x_range = (min(x["ref"]), max(x["ref"]))
# https://stackoverflow.com/questions/35091879/merge-2-arrays-vertical-to-tuple-numpy
# Two parallel lists of values
values = np.array((x[value_type], y[value_type]))
# Zip the values together (i.e., list of (x,y) instead of (list of x, list of y)
values = values.T
if y["var"] == "TAUX":
value_strs = [""]
else:
value_strs = ["positive ", "negative "]
for value_str in value_strs:
# https://stackoverflow.com/questions/24219841/numpy-where-on-a-2d-matrix
if value_str == "positive ":
# For all rows (x,y), choose the rows where x > 0
rows = np.where(values[:, 0] > 0)
smaller_x_range = (0, x_range[1])
linestyle = "-"
elif value_str == "negative ":
# For all rows (x,y), choose the rows where x < 0
rows = np.where(values[:, 0] < 0)
smaller_x_range = (x_range[0], 0)
linestyle = "--"
elif value_str == "":
rows = None
smaller_x_range = x_range
linestyle = "-"
if rows:
# Get the filtered zip (list of (x,y) where x > 0 or x < 0)
filtered_values = values[rows]
else:
filtered_values = values
# Get the filtered parallel lists (i.e., (list of x, list of y))
filtered_values = filtered_values.T
# https://stackoverflow.com/questions/19068862/how-to-overplot-a-line-on-a-scatter-plot-in-python
b, m = polyfit(filtered_values[0], filtered_values[1], 1)
label = "Linear fit for %sTS anomalies: %s (slope = %.2f)" % (
value_str,
type_str,
m,
| |
<filename>foundrytoencounter.py
# vim: set tabstop=8 softtabstop=0 expandtab shiftwidth=4 smarttab : #
import xml.etree.cElementTree as ET
import json
import re
import sys
import os
import tempfile
import shutil
import argparse
import uuid
from slugify import slugify
import zipfile
import urllib.parse
import urllib.request
import math
import PIL.Image
import PIL.ImageOps
import random
import html
import magic
# Argument Parser
parser = argparse.ArgumentParser(
description="Converts Foundry Modules/Worlds to EncounterPlus Modules")
parser.add_argument(
'-o',
dest="output",
action='store',
default=None,
help="output into given output (default: [name].module)")
parser.add_argument(
'-c',
dest="compendium",
action='store_const',
const=True,
default=False,
help="create compendium content with actors and items")
parser.add_argument(
'-j',
dest="jpeg",
action='store_const',
const=".jpg",
default=".png",
help="convert WebP to JPG instead of PNG")
parserg = parser.add_mutually_exclusive_group()
parserg.add_argument(
dest="srcfile",
action='store',
default=False,
nargs='?',
help="foundry file to convert")
parserg.add_argument(
'-gui',
dest="gui",
action='store_const',
default=False,
const=True,
help="use graphical interface")
args = parser.parse_args()
if not args.srcfile and not args.gui:
if sys.platform in ['darwin','win32']:
args.gui = True
else:
parser.print_help()
exit()
numbers = ['zero','one','two','three','four']
stats = {"str":"Strength","dex":"Dexterity","con":"Constitution","int":"Intelligence","wis":"Wisdom","cha":"Charisma"}
skills = {
"acr": "Acrobatics",
"ani": "Animal Handling",
"arc": "Arcana",
"ath": "Athletics",
"dec": "Deception",
"his": "History",
"ins": "Insight",
"inv": "Investigation",
"itm": "Intimidation",
"med": "Medicine",
"nat": "Nature",
"per": "Persuasion",
"prc": "Perception",
"prf": "Performance",
"rel": "Religion",
"slt": "Sleight of Hand",
"ste": "Stealth",
"sur": "Survival"
}
def indent(elem, level=0):
i = "\n" + level * " "
j = "\n" + (level - 1) * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
def fixRoll(m):
if m.group(2):
return '<a href="/roll/{0}/{1}">{0}</a>'.format(m.group(1),m.group(2))
else:
return '<a href="/roll/{0}">{0}</a>'.format(m.group(1))
def convert(args=args,worker=None):
def createMap(map,mapgroup):
if "padding" in map:
map["offsetX"] = math.ceil((map["padding"]*map["width"])/map["grid"])*map["grid"]
map["offsetY"] = math.ceil((map["padding"]*map["height"])/map["grid"])*map["grid"]
else:
map["offsetX"] = (map["width"] + math.ceil(0.5 * map["width"] / (map["grid"] * 2)) * (map["grid"] * 2) - map["width"]) * 0.5
map["offsetY"] = (map["height"] + math.ceil(0.5 * map["height"] / (map["grid"] * 2)) * (map["grid"] * 2) - map["height"]) * 0.5
mapbaseslug = slugify(map['name'])
mapslug = mapbaseslug + str(len([i for i in slugs if mapbaseslug in i]))
slugs.append(mapslug)
if not map["img"] and map["tiles"][0]["width"] >= map["width"] and map["tiles"][0]["height"] >= map["height"]:
bg = map["tiles"].pop(0)
bg["img"] = urllib.parse.unquote(bg["img"])
imgext = os.path.splitext(os.path.basename(urllib.parse.urlparse(bg["img"]).path))[1]
if not imgext:
imgext = args.jpeg
if imgext == ".webp":
PIL.Image.open(bg["img"]).save(os.path.join(tempdir,os.path.splitext(bg["img"])[0]+args.jpeg))
os.remove(bg["img"])
map["img"] = os.path.splitext(bg["img"])[0]+args.jpeg
else:
map["img"] = bg["img"]
map["shiftX"] = bg["x"]-map["offsetX"]
map["shiftY"] = bg["y"]-map["offsetY"]
map["width"] = bg["width"]
map["height"] = bg["height"]
map["rescale"] = 1.0
if map["width"] > 8192 or map["height"] > 8192:
map["rescale"] = 8192.0/map["width"] if map["width"] >= map["height"] else 8192.0/map["height"]
map["grid"] = round(map["grid"]*map["rescale"])
map["width"] *= map["rescale"]
map["height"] *= map["rescale"]
map['shiftX'] *= map["rescale"]
map['shiftY'] *= map["rescale"]
mapentry = ET.SubElement(module,'map',{'id': map['_id'],'parent': mapgroup,'sort': str(int(map["sort"]))})
ET.SubElement(mapentry,'name').text = map['name']
ET.SubElement(mapentry,'slug').text = mapslug
ET.SubElement(mapentry,'gridSize').text = str(round(map["grid"]))#*(5.0/map["gridDistance"])))
ET.SubElement(mapentry,'gridScale').text = str(round(map["gridDistance"]))#*((5.0/map["gridDistance"]))))
ET.SubElement(mapentry,'gridUnits').text = str(map["gridUnits"])
ET.SubElement(mapentry,'gridVisible').text = "YES" if map['gridAlpha'] > 0 else "NO"
ET.SubElement(mapentry,'gridColor').text = map['gridColor']
ET.SubElement(mapentry,'gridOffsetX').text = str(round(map['shiftX']))
ET.SubElement(mapentry,'gridOffsetY').text = str(round(map['shiftY']))
if map["img"]:
map["img"] = urllib.parse.unquote(map["img"])
imgext = os.path.splitext(os.path.basename(map["img"]))[1]
if imgext == ".webp":
ET.SubElement(mapentry,'image').text = os.path.splitext(map["img"])[0]+args.jpeg
else:
ET.SubElement(mapentry,'image').text = map["img"]
with PIL.Image.open(map["img"]) as img:
if img.width > 8192 or img.height > 8192:
scale = 8192/img.width if img.width>=img.height else 8192/img.height
if args.gui:
worker.outputLog(" - Resizing map from {}x{} to {}x{}".format(img.width,img.height,round(img.width*scale),round(img.height*scale)))
img = img.resize((round(img.width*scale),round(img.height*scale)))
if imgext == ".webp":
if args.gui:
worker.outputLog(" - Converting map from .webp to " + args.jpeg)
img.save(os.path.join(tempdir,os.path.splitext(map["img"])[0]+args.jpeg))
os.remove(map["img"])
else:
img.save(os.path.join(tempdir,map["img"]))
elif imgext == ".webp":
if args.gui:
worker.outputLog(" - Converting map from .webp to " + args.jpeg)
img.save(os.path.join(tempdir,os.path.splitext(map["img"])[0]+args.jpeg))
os.remove(map["img"])
if map["height"] != img.height or map["width"] != img.width:
map["scale"] = map["width"]/img.width if map["width"]/img.width >= map["height"]/img.height else map["height"]/img.height
else:
map["scale"] = 1.0
else:
print(" |> Map Error NO BG FOR: {}".format(map["name"]),file=sys.stderr,end='')
map["scale"] = 1.0
with PIL.Image.new('1', (map["width"], map["height"]), color = 'black') as img:
if img.width > 8192 or img.height > 8192:
scale = 8192/img.width if img.width>=img.height else 8192/img.height
if args.gui:
worker.outputLog(" - Resizing map from {}x{} to {}x{}".format(img.width,img.height,round(img.width*scale),round(img.height*scale)))
img = img.resize((round(img.width*scale),round(img.height*scale)))
img.save(os.path.join(tempdir,mapslug+"_bg.png"))
if map["height"] != img.height or map["width"] != img.width:
map["scale"] = map["width"]/img.width if map["width"]/img.width >= map["height"]/img.height else map["height"]/img.height
else:
map["scale"] = 1.0
ET.SubElement(mapentry,'image').text = mapslug+"_bg.png"
if 'thumb' in map and map["thumb"]:
imgext = os.path.splitext(os.path.basename(map["img"]))[1]
if imgext == ".webp":
ET.SubElement(mapentry,'snapshot').text = os.path.splitext(map["thumb"])[0]+args.jpeg
PIL.Image.open(map["thumb"]).save(os.path.join(tempdir,os.path.splitext(map["thumb"])[0]+args.jpeg))
os.remove(map["thumb"])
else:
ET.SubElement(mapentry,'snapshot').text = map["thumb"]
ET.SubElement(mapentry,'scale').text = str(map["scale"])
if "walls" in map and len(map["walls"])>0:
ET.SubElement(mapentry,'lineOfSight').text = "YES"
for i in range(len(map["walls"])):
p = map["walls"][i]
print("\rwall {}".format(i),file=sys.stderr,end='')
pathlist = [
(p["c"][0]-map["offsetX"])*map["rescale"],
(p["c"][1]-map["offsetY"])*map["rescale"],
(p["c"][2]-map["offsetX"])*map["rescale"],
(p["c"][3]-map["offsetY"])*map["rescale"]
]
isConnected = False
for pWall in mapentry.iter('wall'):
lastpath = pWall.find('data')
pWallID=pWall.get('id')
if lastpath != None and lastpath.text.endswith(",{:.1f},{:.1f}".format(pathlist[0],pathlist[1])):
wType = pWall.find('type')
if p['door'] > 0:
if p['door'] == 1 and wType.text != 'door':
continue
if p['door'] == 2 and wType.text != 'secretDoor':
continue
if p['ds'] > 0:
door = pWall.find('door')
if door == None:
continue
elif p['ds'] == 1 and door.text != 'open':
continue
elif p['ds'] == 2 and door.text != 'locked':
continue
elif wType.text in ['door','secretDoor']:
continue
elif p['move'] == 0 and p['sense'] == 1 and wType.text != 'ethereal':
continue
elif p['move'] == 1 and p['sense'] == 0 and wType.text != 'invisible':
continue
elif p['move'] == 1 and p['sense'] == 2 and wType.text != 'terrain':
continue
if 'dir' in p:
wSide = pWall.find('side')
if wSide == None and p['dir'] > 0:
continue
if p['dir'] == 1 and wSide.text != 'left':
continue
if p['dir'] == 2 and wSide.text != 'right':
continue
isConnected = True
pWall.set('id',pWallID+' '+p['_id'])
lastpath.text += ','+','.join("{:.1f}".format(x) for x in pathlist)
break
if not isConnected:
wall = ET.SubElement(mapentry,'wall',{'id': p['id'] if 'id' in p else p['_id'] })
lastpath = ET.SubElement(wall,'data')
lastpath.text = ','.join("{:.1f}".format(x) for x in pathlist)
if not isConnected:
if 'door' in p and p['door'] == 1:
ET.SubElement(wall,'type').text = 'door'
ET.SubElement(wall,'color').text = '#00ffff'
if p['ds'] > 0:
ET.SubElement(wall,'door').text = 'locked' if p['ds'] == 2 else 'open'
elif p['door'] == 2:
ET.SubElement(wall,'type').text = 'secretDoor'
ET.SubElement(wall,'color').text = '#00ffff'
if p['ds'] > 0:
ET.SubElement(wall,'door').text = 'locked' if p['ds'] == 2 else 'open'
elif p['move'] == 0 and p['sense'] == 1:
ET.SubElement(wall,'type').text = 'ethereal'
ET.SubElement(wall,'color').text = '#7f007f'
elif p['move'] == 1 and p['sense'] == 0:
ET.SubElement(wall,'type').text = 'invisible'
ET.SubElement(wall,'color').text = '#ff00ff'
elif p['move'] == 1 and p['sense'] == 2:
ET.SubElement(wall,'type').text = 'terrain'
ET.SubElement(wall,'color').text = '#ffff00'
else:
ET.SubElement(wall,'type').text = 'normal'
ET.SubElement(wall,'color').text = '#ff7f00'
if 'dir' in p and p['dir'] > 0:
ET.SubElement(wall,'side').text = 'left' if p['dir'] == 1 else 'right'
if 'door' in p and p['door'] > 0:
p["stroke"] = '#00ffff'
else:
p["stroke"] = '#ff7f00'
p["stroke_width"] = 5
p["layer"] = "walls"
ET.SubElement(wall,'generated').text = 'YES'
if 'tiles' in map:
for i in range(len(map["tiles"])):
image = map["tiles"][i]
image["img"] = urllib.parse.unquote(image["img"])
print("\rtiles [{}/{}]".format(i,len(map["tiles"])),file=sys.stderr,end='')
tile = ET.SubElement(mapentry,'tile')
ET.SubElement(tile,'x').text = str(round((image["x"]-map["offsetX"]+(image["width"]*image["scale"]/2))*map["rescale"]))
ET.SubElement(tile,'y').text = str(round((image["y"]-map["offsetY"]+(image["height"]*image["scale"]/2))*map["rescale"]))
ET.SubElement(tile,'zIndex').text = str(image["z"])
ET.SubElement(tile,'width').text = str(round(image["width"]*image["scale"]*map["rescale"]))
ET.SubElement(tile,'height').text = str(round(image["height"]*image["scale"]*map["rescale"]))
ET.SubElement(tile,'opacity').text = "1.0"
ET.SubElement(tile,'rotation').text = str(image["rotation"])
ET.SubElement(tile,'locked').text = "YES" if image["locked"] else "NO"
ET.SubElement(tile,'layer').text = "object"
ET.SubElement(tile,'hidden').text = "YES" if image["hidden"] else "NO"
asset = ET.SubElement(tile,'asset')
ET.SubElement(asset,'name').text = os.path.splitext(os.path.basename(image["img"]))[0]
ET.SubElement(asset,'type').text = "image"
imgext = os.path.splitext(os.path.basename(image["img"]))[1]
if image["img"].startswith("http"):
urllib.request.urlretrieve(image["img"],os.path.basename(image["img"]))
image["img"] = os.path.basename(image["img"])
img = PIL.Image.open(image["img"])
if imgext == ".webp":
ET.SubElement(asset,'resource').text = os.path.splitext(image["img"])[0]+".png"
if img.width > 4096 or img.height > 4096:
scale = 4095/img.width if img.width>=img.height else 4095/img.height
img = img.resize((round(img.width*scale),round(img.height*scale)))
if args.gui:
worker.outputLog(" - Converting tile from webp to png")
img.save(os.path.join(tempdir,os.path.splitext(image["img"])[0]+".png"))
os.remove(image["img"])
else:
ET.SubElement(asset,'resource').text = image["img"]
if img.width > 4096 or img.height > 4096:
scale = 4095/img.width if img.width>=img.height else 4095/img.height
img = img.resize((round(img.width*scale),round(img.height*scale)))
img.save(os.path.join(tempdir,image["img"]))
if 'lights' in map:
for i in range(len(map["lights"])):
print("\rlights [{}/{}]".format(i,len(map["lights"])),file=sys.stderr,end='')
light = map["lights"][i]
tile = ET.SubElement(mapentry,'tile')
ET.SubElement(tile,'x').text = str(round((light["x"]-map["offsetX"])))
ET.SubElement(tile,'y').text = str(round((light["y"]-map["offsetY"])))
ET.SubElement(tile,'zIndex').text = str(0)
ET.SubElement(tile,'width').text = str(round(50*map["rescale"]))
ET.SubElement(tile,'height').text = str(round(50*map["rescale"]))
ET.SubElement(tile,'opacity').text = "1.0"
ET.SubElement(tile,'rotation').text = str(0)
ET.SubElement(tile,'locked').text = "YES"
ET.SubElement(tile,'layer').text = "dm"
ET.SubElement(tile,'hidden').text = "YES"
asset = ET.SubElement(tile,'asset', {'id': str(uuid.uuid5(moduuid,mapslug+"/lights/"+str(i)))})
ET.SubElement(asset,'name').text = "Light {}".format(i+1)
lightel = ET.SubElement(tile,'light', {'id': str(uuid.uuid5(moduuid,mapslug+"/lights/"+str(i)+"light"))})
ET.SubElement(lightel,'radiusMax').text = str(light["dim"]*map["gridDistance"])
ET.SubElement(lightel,'radiusMin').text = str(light["bright"]*map["gridDistance"])
ET.SubElement(lightel,'color').text = light["tintColor"] if "tintColor" in light and light["tintColor"] else "#ffffff"
ET.SubElement(lightel,'opacity').text = str(light["tintAlpha"])
ET.SubElement(lightel,'alwaysVisible').text = "YES" if light["t"] == "u" else "NO"
if 'tokens' in map and len(map['tokens']) > 0:
encentry = ET.SubElement(module,'encounter',{'id': str(uuid.uuid5(moduuid,mapslug+"/encounter")),'parent': map['_id']})
ET.SubElement(encentry,'name').text = map['name'] + " Encounter"
ET.SubElement(encentry,'slug').text = slugify(map['name'] + " Encounter")
for token in map['tokens']:
combatant = ET.SubElement(encentry,'combatant')
ET.SubElement(combatant,'name').text = token['name']
ET.SubElement(combatant,'role').text = "hostile" if token['disposition'] < 0 else "friendly" if token['disposition'] > 0 else "neutral"
ET.SubElement(combatant,'x').text = | |
<gh_stars>100-1000
##########################################################################
#
# Copyright (c) 2012, <NAME>. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import subprocess
import unittest
import imath
import IECore
import IECoreScene
import IECoreImage
import IECoreArnold
import Gaffer
import GafferOSL
import GafferTest
import GafferImage
import GafferScene
import GafferSceneTest
import GafferArnold
# Decorator that executes in a subprocess with our test metadata file on the
# `ARNOLD_PLUGIN_PATH`.
def withMetadata( func ) :
def wrapper( self ) :
metadataPath = os.path.join( os.path.dirname( __file__ ), "metadata" )
if metadataPath not in os.environ["ARNOLD_PLUGIN_PATH"].split( ":" ) :
env = os.environ.copy()
env["ARNOLD_PLUGIN_PATH"] = env["ARNOLD_PLUGIN_PATH"] + ":" + metadataPath
try :
subprocess.check_output(
[ "gaffer", "test", "GafferArnoldTest.ArnoldShaderTest." + func.__name__ ],
env = env, stderr = subprocess.STDOUT
)
except subprocess.CalledProcessError as e :
self.fail( e.output )
else :
func( self )
return wrapper
class ArnoldShaderTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "noise" )
def testAttributes( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "utility" )
network = n.attributes()["ai:surface"]
self.assertTrue( isinstance( network, IECoreScene.ShaderNetwork ) )
self.assertEqual( len( network ), 1 )
self.assertEqual( network.outputShader().name, "utility" )
def testParameterRepresentation( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "wireframe" )
self.assertIsInstance( n["parameters"]["line_width"], Gaffer.FloatPlug )
self.assertIsInstance( n["parameters"]["fill_color"], Gaffer.Color3fPlug )
self.assertIsInstance( n["parameters"]["line_color"], Gaffer.Color3fPlug )
self.assertIsInstance( n["parameters"]["raster_space"], Gaffer.BoolPlug )
self.assertIsInstance( n["parameters"]["edge_type"], Gaffer.StringPlug )
self.assertNotIn( "name", n["parameters"] )
def testParameterUse( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "wireframe" )
n["parameters"]["line_width"].setValue( 10 )
n["parameters"]["fill_color"].setValue( imath.Color3f( .25, .5, 1 ) )
n["parameters"]["raster_space"].setValue( False )
n["parameters"]["edge_type"].setValue( "polygons" )
s = n.attributes()["ai:surface"].outputShader()
self.assertEqual( s.parameters["line_width"], IECore.FloatData( 10 ) )
self.assertEqual( s.parameters["fill_color"], IECore.Color3fData( imath.Color3f( .25, .5, 1 ) ) )
self.assertEqual( s.parameters["line_color"], IECore.Color3fData( imath.Color3f( 0 ) ) )
self.assertEqual( s.parameters["raster_space"], IECore.BoolData( False ) )
self.assertEqual( s.parameters["edge_type"], IECore.StringData( "polygons" ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferArnold.ArnoldShader()
s["n"].loadShader( "wireframe" )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertIsInstance( s["n"]["parameters"]["line_width"], Gaffer.FloatPlug )
self.assertIsInstance( s["n"]["parameters"]["fill_color"], Gaffer.Color3fPlug )
self.assertIsInstance( s["n"]["parameters"]["line_color"], Gaffer.Color3fPlug )
self.assertIsInstance( s["n"]["parameters"]["raster_space"], Gaffer.BoolPlug )
self.assertIsInstance( s["n"]["parameters"]["edge_type"], Gaffer.StringPlug )
def testHash( self ) :
n = GafferArnold.ArnoldShader()
h = n.attributesHash()
n.loadShader( "noise" )
h2 = n.attributesHash()
self.assertNotEqual( h, h2 )
n["parameters"]["octaves"].setValue( 10 )
h3 = n.attributesHash()
self.assertNotEqual( h2, h3 )
def testShaderNetwork( self ) :
s = GafferArnold.ArnoldShader( "surface" )
s.loadShader( "standard_surface" )
n = GafferArnold.ArnoldShader( "noise" )
n.loadShader( "noise" )
s["parameters"]["base_color"].setInput( n["out"] )
s["parameters"]["specular_color"].setInput( n["out"] )
network = s.attributes()["ai:surface"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.getShader( "noise" ).type, "ai:shader" )
self.assertEqual( network.getShader( "noise" ).name, "noise" )
self.assertEqual( network.getShader( "surface" ).type, "ai:surface" )
self.assertEqual( network.getShader( "surface" ).name, "standard_surface" )
self.assertEqual(
network.inputConnections( "surface" ),
[
network.Connection( ( "noise", "" ), ( "surface", "base_color" ) ),
network.Connection( ( "noise", "" ), ( "surface", "specular_color" ) ),
]
)
def testShaderNetworkRender( self ) :
f = GafferArnold.ArnoldShader()
f.loadShader( "flat" )
f["parameters"]["color"].setValue( imath.Color3f( 1, 1, 0 ) )
s = GafferArnold.ArnoldShader()
s.loadShader( "utility" )
s["parameters"]["color"].setInput( f["parameters"]["color"] )
r = GafferScene.Private.IECoreScenePreview.Renderer.create(
"Arnold",
GafferScene.Private.IECoreScenePreview.Renderer.RenderType.Batch
)
r.output(
"test",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ImageDisplayDriver",
"handle" : "test"
}
)
)
mesh = r.object(
"mesh",
IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) ),
r.attributes( s.attributes() )
)
mesh.transform( imath.M44f().translate( imath.V3f( 0, 0, -5 ) ) )
r.render()
imagePrimitive = IECoreImage.ImageDisplayDriver.removeStoredImage( "test" )
pixelPos = 320 + 240 * 640
self.assertAlmostEqual( imagePrimitive["R"][pixelPos], 1, 5 )
self.assertAlmostEqual( imagePrimitive["G"][pixelPos], 1, 5 )
self.assertEqual( imagePrimitive["B"][pixelPos], 0 )
def testShaderNetworkHash( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
h1 = s.attributesHash()
n = GafferArnold.ArnoldShader()
n.loadShader( "noise" )
s["parameters"]["base_color"].setInput( n["out"] )
h2 = s.attributesHash()
self.assertNotEqual( h1, h2 )
n["parameters"]["octaves"].setValue( 3 )
h3 = s.attributesHash()
self.assertNotEqual( h3, h2 )
self.assertNotEqual( h3, h1 )
def testShaderNetworkHashWithNonShaderInputs( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
n = GafferArnold.ArnoldShader()
n.loadShader( "noise" )
s["parameters"]["base_color"].setInput( n["out"] )
r = Gaffer.Random()
r["contextEntry"].setValue( "a" )
n["parameters"]["amplitude"].setInput( r["outFloat"] )
c = Gaffer.Context()
with c :
c["a"] = "/one/two/1"
h1 = s.attributesHash()
c["a"] = "/one/two/2"
h2 = s.attributesHash()
self.assertNotEqual( h1, h2 )
def testStandardShaderAcceptsImageInputs( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
t = GafferArnold.ArnoldShader()
t.loadShader( "image" )
s["parameters"]["emission_color"].setInput( t["out"] )
self.assertTrue( s["parameters"]["emission_color"].getInput().isSame( t["out"] ) )
self.assertTrue( s["parameters"]["emission_color"][0].getInput().isSame( t["out"][0] ) )
self.assertTrue( s["parameters"]["emission_color"][1].getInput().isSame( t["out"][1] ) )
self.assertTrue( s["parameters"]["emission_color"][2].getInput().isSame( t["out"][2] ) )
def testDirtyPropagationThroughNetwork( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
n1 = GafferArnold.ArnoldShader()
n1.loadShader( "noise" )
n2 = GafferArnold.ArnoldShader()
n2.loadShader( "noise" )
s["parameters"]["base_color"].setInput( n1["out"] )
n1["parameters"]["color1"].setInput( n2["out"] )
cs = GafferTest.CapturingSlot( s.plugDirtiedSignal() )
n2["parameters"]["amplitude"].setValue( 20 )
self.assertTrue( "ArnoldShader.out" in [ x[0].fullName() for x in cs ] )
def testConnectionsBetweenParameters( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "add" )
s["parameters"]["input1"].setValue( imath.Color3f( 0.1, 0.2, 0.3 ) )
s["parameters"]["input2"].setInput( s["parameters"]["input1"] )
shader = s.attributes()["ai:surface"].outputShader()
self.assertEqual( shader.parameters["input1"].value, imath.Color3f( 0.1, 0.2, 0.3 ) )
self.assertEqual( shader.parameters["input2"].value, imath.Color3f( 0.1, 0.2, 0.3 ) )
def testDisabling( self ) :
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
attributesHash = s.attributesHash()
attributes = s.attributes()
self.assertEqual( len( attributes ), 1 )
self.assertEqual( attributes["ai:surface"].outputShader().name, "standard_surface" )
self.assertTrue( s["enabled"].isSame( s.enabledPlug() ) )
s["enabled"].setValue( False )
attributesHash2 = s.attributesHash()
self.assertNotEqual( attributesHash2, attributesHash )
attributes2 = s.attributes()
self.assertEqual( len( attributes2 ), 0 )
def testDisablingInNetwork( self ) :
s = GafferArnold.ArnoldShader( "s" )
s.loadShader( "standard_surface" )
f = GafferArnold.ArnoldShader( "f" )
f.loadShader( "flat" )
s["parameters"]["specular_color"].setInput( f["out"] )
attributesHash = s.attributesHash()
attributes = s.attributes()
self.assertEqual( len( attributes ), 1 )
self.assertEqual( attributes["ai:surface"].getShader( "s" ).name, "standard_surface" )
self.assertEqual( attributes["ai:surface"].getShader( "f" ).name, "flat" )
self.assertTrue( s["enabled"].isSame( s.enabledPlug() ) )
f["enabled"].setValue( False )
attributesHash2 = s.attributesHash()
self.assertNotEqual( attributesHash2, attributesHash )
attributes2 = s.attributes()
self.assertEqual( len( attributes2 ), 1 )
for key in attributes["ai:surface"].getShader( "s" ).parameters.keys() :
if key != "specular_color" :
self.assertEqual(
attributes["ai:surface"].getShader( "s" ).parameters[key],
attributes2["ai:surface"].getShader( "s" ).parameters[key]
)
def testAssignmentAttributeName( self ) :
p = GafferScene.Plane()
s = GafferArnold.ArnoldShader()
s.loadShader( "standard_surface" )
a = GafferScene.ShaderAssignment()
a["in"].setInput( p["out"] )
a["shader"].setInput( s["out"] )
self.assertEqual( a["out"].attributes( "/plane" ).keys(), [ "ai:surface"] )
def testLightFilterAssignmentAttributeName( self ) :
p = GafferScene.Plane()
s = GafferArnold.ArnoldShader( "light_blocker" )
s.loadShader( "light_blocker" ) # metadata sets type to ai:lightFilter
a = GafferScene.ShaderAssignment()
a["in"].setInput( p["out"] )
a["shader"].setInput( s["out"] )
self.assertEqual( s["attributeSuffix"].getValue(), "light_blocker" )
self.assertEqual( a["out"].attributes( "/plane" ).keys(), [ "ai:lightFilter:light_blocker"] )
def testDirtyPropagationThroughShaderAssignment( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "flat" )
p = GafferScene.Plane()
a = GafferScene.ShaderAssignment()
a["in"].setInput( p["out"] )
a["shader"].setInput( n["out"] )
cs = GafferTest.CapturingSlot( a.plugDirtiedSignal() )
n["parameters"]["color"]["r"].setValue( 0.25 )
self.assertEqual(
[ c[0] for c in cs ],
[
a["shader"],
a["out"]["attributes"],
a["out"],
],
)
def testByteParameters( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "image" )
p = n["parameters"]["start_channel"]
self.assertTrue( isinstance( p, Gaffer.IntPlug ) )
self.assertEqual( p.minValue(), 0 )
self.assertEqual( p.maxValue(), 255 )
def testMeshLight( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "mesh_light" )
self.assertEqual( n["name"].getValue(), "mesh_light" )
self.assertEqual( n["type"].getValue(), "ai:light" )
self.assertTrue( "exposure" in n["parameters"] )
self.assertTrue( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
def testColorParameterMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "ray_switch" )
for p in n["parameters"] :
self.assertTrue( isinstance( p, Gaffer.Color4fPlug ) )
self._testColorParameterMetadata()
@withMetadata
def _testColorParameterMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "ray_switch" )
for name in [ "camera", "shadow", "diffuse_transmission" ] :
self.assertTrue( isinstance( n["parameters"][name], Gaffer.Color3fPlug ) )
for name in [ "diffuse_reflection", "specular_transmission", "specular_reflection", "volume" ] :
self.assertTrue( isinstance( n["parameters"][name], Gaffer.Color4fPlug ) )
def testFloatParameterMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "gobo" )
self.assertTrue( isinstance( n["parameters"]["slidemap"], Gaffer.Color3fPlug ) )
self._testFloatParameterMetadata()
@withMetadata
def _testFloatParameterMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "gobo" )
self.assertTrue( isinstance( n["parameters"]["slidemap"], Gaffer.FloatPlug ) )
def testEmptyPlugTypeMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "standard_surface" )
self.assertTrue( "diffuse_roughness" in n["parameters"] )
self._testEmptyPlugTypeMetadata()
@withMetadata
def _testEmptyPlugTypeMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "standard_surface" )
self.assertTrue( "diffuse_roughness" not in n["parameters"] )
n = GafferArnold.ArnoldShader()
n.loadShader( "standard_surface" )
self.assertTrue( "diffuse_roughness" not in n["parameters"] )
def testDefaultOverrideMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "image" )
self.assertEqual( n["parameters"]["single_channel"].defaultValue(), False )
self.assertEqual( n["parameters"]["mipmap_bias"].defaultValue(), 0 )
self.assertEqual( n["parameters"]["start_channel"].defaultValue(), 0 )
self.assertEqual( n["parameters"]["sscale"].defaultValue(), 1.0 )
self.assertEqual( n["parameters"]["multiply"].defaultValue(), imath.Color3f( 1.0 ) )
self.assertEqual( n["parameters"]["missing_texture_color"].defaultValue(), imath.Color4f( 0.0 ) )
self.assertEqual( n["parameters"]["uvcoords"].defaultValue(), imath.V2f( 0.0 ) )
self.assertEqual( n["parameters"]["filename"].defaultValue(), "" )
self.assertEqual( n["parameters"]["filter"].defaultValue(), "smart_bicubic" )
self._testDefaultOverrideMetadata()
@withMetadata
def _testDefaultOverrideMetadata( self ) :
n = GafferArnold.ArnoldShader()
n.loadShader( "image" )
self.assertEqual( n["parameters"]["single_channel"].defaultValue(), True )
self.assertEqual( n["parameters"]["mipmap_bias"].defaultValue(), 42 )
self.assertEqual( n["parameters"]["start_channel"].defaultValue(), 42 )
self.assertAlmostEqual( n["parameters"]["sscale"].defaultValue(), 42.42, places = 5 )
self.assertEqual( n["parameters"]["multiply"].defaultValue(), imath.Color3f( 1.2, 3.4, 5.6 ) )
# RGBA metadata support added in Arnold 5.3. Need to wait until we standardise on that
# to add this declaration to the test metadata
#self.assertEqual( n["parameters"]["missing_texture_color"].defaultValue(), imath.Color4f( 1.2, 3.4, 5.6, 7.8 ) )
self.assertEqual( n["parameters"]["uvcoords"].defaultValue(), imath.V2f( 1.2, 3.4 ) )
self.assertEqual( n["parameters"]["filename"].defaultValue(), "overrideDefault" )
self.assertEqual( n["parameters"]["filter"].defaultValue(), "closest" )
def testMixAndMatchWithOSLShaders( self ) :
utility = GafferArnold.ArnoldShader()
utility.loadShader( "utility" )
colorToFloat = GafferOSL.OSLShader()
colorToFloat.loadShader( "Conversion/ColorToFloat" )
colorToFloat["parameters"]["c"].setInput( utility["out"] )
colorSpline = GafferOSL.OSLShader()
colorSpline.loadShader( "Pattern/ColorSpline" )
colorSpline["parameters"]["x"].setInput( colorToFloat["out"]["r"] )
flat = GafferArnold.ArnoldShader()
flat.loadShader( "flat" )
flat["parameters"]["color"].setInput( colorSpline["out"]["c"] )
def testReload( self ) :
image = GafferArnold.ArnoldShader()
image.loadShader( "image" )
image["parameters"]["swap_st"].setValue( True )
image["parameters"]["uvcoords"].setValue( imath.V2f( 0.5, 1 | |
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: entities.py
#
# Copyright 2020 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Main code for entities.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import logging
import json
from awsapilib.authentication import LoggerMixin
__author__ = '''<NAME> <<EMAIL>>, <NAME> <<EMAIL>>'''
__docformat__ = '''google'''
__date__ = '''18-05-2020'''
__copyright__ = '''Copyright 2020, S<NAME>, Costas Tyfoxylos'''
__credits__ = ["<NAME>", "<NAME>"]
__license__ = '''MIT'''
__maintainer__ = '''<NAME>, <NAME>'''
__email__ = '''<<EMAIL>>, <<EMAIL>>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
# This is the main prefix used for logging
LOGGER_BASENAME = '''entities'''
LOGGER = logging.getLogger(LOGGER_BASENAME)
LOGGER.addHandler(logging.NullHandler())
class Entity(LoggerMixin): # pylint: disable=too-few-public-methods
"""The core entity."""
def __init__(self, sso_instance, data):
self._sso = sso_instance
self._data = self._parse_data(data)
def _parse_data(self, data):
if not isinstance(data, dict):
self.logger.error(f'Invalid data received :{data}')
data = {}
return data
class Group(Entity):
"""Models the group object of AWS SSO."""
def __init__(self, sso_instance, data):
super().__init__(sso_instance, data)
self.url = f'{sso_instance.api_url}/userpool'
@property
def id(self): # pylint: disable=invalid-name
"""The id of the group.
Returns:
id (str) : The id of the group
"""
return self._data.get('GroupId')
@property
def name(self):
"""The name of the group.
Returns:
name (str) : The name of the group
"""
return self._data.get('GroupName', '')
@property
def description(self):
"""The description of the group.
Returns:
description (str) : The description of the group
"""
return self._data.get('Description', '')
@property
def users(self):
"""The users in the group.
Returns:
users (list): The users part of the group
"""
content_payload = {'GroupId': self.id,
'MaxResults': 100}
target = 'com.amazonaws.swbup.service.SWBUPService.ListMembersInGroup'
for user in self._sso._get_paginated_results(content_payload=content_payload, # pylint: disable=protected-access
path='userpool',
target='ListMembersInGroup',
amz_target=target,
object_group='Members',
url=self.url):
yield self._sso.get_user_by_id(user.get('Id'))
class Account(Entity):
"""Models the Account object of AWS SSO."""
@property
def url(self):
"""Url for the account.
Returns:
url (str): The url of the account
"""
return self._sso.endpoint_url
@property
def name(self):
"""The name of the application.
Returns:
name (str): The name of the application
"""
return self._data.get('Name')
@property
def email(self):
"""The name of the application.
Returns:
email (str) : The name of the application
"""
return self._data.get('Email')
@property
def id(self): # pylint: disable=invalid-name
"""The id of the application.
Returns:
id (str): The id of the application
"""
return self._data.get('Id')
@property
def arn(self):
"""The arn of the application.
Returns:
arn (str): The arn of the application
"""
return self._data.get('Arn')
@property
def status(self):
"""The status of the application.
Returns:
status (str): The status of the application
"""
return self._data.get('Status')
def provision_saml_provider(self):
"""Creates the SAMl provider.
Returns:
arn (str): The arn of the SAMl provider
"""
target = 'com.amazon.switchboard.service.SWBService.ProvisionSAMLProvider'
payload = self._sso.get_api_payload(content_string={'applicationInstanceId': self.instance_id
},
target='ProvisionSAMLProvider',
path='/control/',
x_amz_target=target)
self.logger.debug('Trying to create saml provider for aws account with payload: %s', payload)
response = self._sso.session.post(self.url, json=payload)
if not response.ok:
self.logger.error(response.text)
return {}
return response.json()
@property
def instance_id(self):
"""The instance id of the Account.
Returns:
instance_id (str): The instance id of the account
"""
instance_id = self._retrieve_instance_id()
if not instance_id:
instance_id = self._provision_application_instance_for_aws_account()
return instance_id
def _provision_application_instance_for_aws_account(self):
target = 'com.amazon.switchboard.service.SWBService.ProvisionApplicationInstanceForAWSAccount'
payload = self._sso.get_api_payload(content_string={'accountId': self.id,
'accountEmail': self.email,
'accountName': self.name
},
target='ProvisionApplicationInstanceForAWSAccount',
path='/control/',
x_amz_target=target)
self.logger.debug('Trying to get instance id for aws account with payload: %s', payload)
response = self._sso.session.post(self.url, json=payload)
if not response.ok:
self.logger.error(response.text)
return None
return response.json().get('applicationInstance', {}).get('instanceId', None)
def _retrieve_instance_id(self):
account_id = self.id
target = 'com.amazon.switchboard.service.SWBService.GetApplicationInstanceForAWSAccount'
payload = self._sso.get_api_payload(content_string={'awsAccountId': account_id},
target='GetApplicationInstanceForAWSAccount',
path='/control/',
x_amz_target=target)
self.logger.debug('Trying to get instance id for aws account with payload: %s', payload)
response = self._sso.session.post(self.url, json=payload)
if not response.ok:
self.logger.error(response.text)
return None
return response.json().get('applicationInstance', {}).get('instanceId', None)
@property
def associated_profiles(self):
"""The associated profiles with the Account.
Returns:
associated_profiles (list): The profiles associated with the Account
"""
target = 'com.amazon.switchboard.service.SWBService.ListAWSAccountProfiles'
payload = self._sso.get_api_payload(content_string={'instanceId': self.instance_id},
target='ListAWSAccountProfiles',
path='/control/',
x_amz_target=target)
self.logger.debug('Trying to provision application profile for aws account with payload: %s', payload)
response = self._sso.session.post(self.url, json=payload)
if not response.ok:
self.logger.error(response.text)
return []
return response.json().get('profileList', [])
class User(Entity):
"""Models the user object of SSO."""
@property
def url(self):
"""Url for the user.
Returns:
url (str): The url for the user
"""
return f'{self._sso.api_url}/userpool'
@property
def status(self):
"""The status of the user.
Returns:
status (str): The status of the user
"""
return self._data.get('Active')
@property
def created_at(self):
"""The date and time of the users's activation.
Returns:
created_at (datetime): The datetime object of when the user was activated
"""
return self._data.get('Meta', {}).get('CreatedAt')
@property
def updated_at(self):
"""The date and time of the users's status change.
Returns:
updated_at (datetime): The datetime object of when the user had last changed status
"""
return self._data.get('Meta', {}).get('UpdatedAt')
@property
def emails(self):
"""The date and time of the users's last password change.
Returns:
emails (datetime): The datetime object of when the user last changed password
"""
return self._data.get('UserAttributes').get('emails', {}).get('ComplexListValue', '')
@property
def _name(self):
return self._data.get('UserAttributes').get('name', {}).get('ComplexValue', {})
@property
def first_name(self):
"""The first name of the user.
Returns:
first_name (str): The first name of the user
"""
return self._name.get('givenName', {}).get('StringValue', '')
@property
def last_name(self):
"""The last name of the user.
Returns:
last_name (str): The last name of the user
"""
return self._name.get('familyName', {}).get('StringValue', '')
@property
def id(self): # pylint: disable=invalid-name
"""The manager of the user.
Returns:
id (str): The manager of the user
"""
return self._data.get('UserId')
@property
def name(self):
"""The manager of the user.
Returns:
name (str): The manager of the user
"""
return self._data.get('UserName')
@property
def display_name(self):
"""The display name of the user.
Returns:
display_name (str): The display name of the user
"""
return self._data.get('UserAttributes', {}).get('displayName', {}).get('StringValue')
@property
def groups(self):
"""The groups associated with the user.
Returns:
groups (list): The groups associated with the user
"""
content_payload = {'UserId': self.id,
'MaxResults': 100}
target = 'com.amazonaws.swbup.service.SWBUPService.ListGroupsForUser'
for group in self._sso._get_paginated_results(content_payload=content_payload, # pylint: disable=protected-access
path='userpool',
target='ListGroupsForUser',
amz_target=target,
object_group='Groups',
url=self.url):
yield self._sso.get_group_by_id(group.get('GroupId'))
class PermissionSet(Entity):
"""Models the permission set object of SSO."""
@property
def url(self):
"""Url of the permission set.
Returns:
url (str): The url of the permission set
"""
return self._sso.endpoint_url
@property
def description(self):
"""The description of the permission set.
Returns:
description (str): The description of the permission set
"""
return self._data.get('Description')
@property
def id(self): # pylint: disable=invalid-name
"""The id of the permission set.
Returns:
id (str): The id of the permission set
"""
return self._data.get('Id')
@property
def name(self):
"""The name of the permission set.
Returns:
name (str): The name of the permission set
"""
return self._data.get('Name')
@property
def ttl(self):
"""The ttl of the permission set.
Returns:
ttl (str): The ttl of the permission set
"""
return self._data.get('ttl')
@property
def creation_date(self):
"""The creation date of the permission set.
Returns:
creation_date (str): The creation date of the permission set
"""
return self._data.get('CreationDate')
@property
def relay_state(self):
"""The relay_state of the permission_set.
Returns:
relay_state (str): The relayState of the permission_set
"""
return self._data.get('relayState')
@property
def permission_policy(self):
"""The permission policy of the permission_set.
Returns:
permission_policy (dict): The permission policy of the permission_set
"""
target = 'com.amazon.switchboard.service.SWBService.GetPermissionsPolicy'
content_string = {'permissionSetId': self.id}
payload = self._sso.get_api_payload(content_string=content_string,
target='GetPermissionsPolicy',
path='/control/',
x_amz_target=target)
self.logger.debug('Getting permission policy for permission_set with payload of %s:', payload)
response = self._sso.session.post(self.url, json=payload)
if not response.ok:
self.logger.error(response.text)
return None
return response.json()
@property
def provisioned_accounts(self):
"""The provisioned accounts with the permission set.
Returns:
list: Accounts provisioned with the permission set
"""
content_payload = {'permissionSetId': self.id,
'onlyOutOfSync': 'false'}
target = 'com.amazon.switchboard.service.SWBService.ListAccountsWithProvisionedPermissionSet'
for account_id in self._sso._get_paginated_results(content_payload=content_payload, # pylint: disable=protected-access
path='control',
target='ListAccountsWithProvisionedPermissionSet',
amz_target=target,
object_group='accountIds',
next_token_marker='marker',
url=self._sso.endpoint_url):
yield self._sso.get_account_by_id(account_id)
def assign_custom_policy_to_permission_set(self, policy_document):
"""Assign | |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype core validation classes.**
This private submodule defines the core low-level class hierarchy driving the
entire :mod:`beartype` validation ecosystem.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ TODO }....................
# All "FIXME:" comments for this submodule reside in this package's "__init__"
# submodule to improve maintainability and readability here.
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeValeSubscriptionException
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.data.utildatadict import merge_mappings_two
from beartype._util.func.utilfuncarg import get_func_args_len_standard
from beartype._util.func.utilfuncscope import CallableScope
from beartype._util.func.utilfunctest import is_func_python
from beartype._util.text.utiltextrepr import represent_object
from typing import Any, Callable
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ HINTS }....................
SubscriptedIsValidator = Callable[[Any,], bool]
'''
PEP-compliant type hint matching a **validator** (i.e., caller-defined callable
accepting a single arbitrary object and returning either ``True`` if that
object satisfies an arbitrary constraint *or* ``False`` otherwise).
Data validators are suitable for subscripting the :class:`Is` class.
'''
# ....................{ CLASSES ~ subscripted }....................
class _SubscriptedIs(object):
'''
**Beartype validator** (i.e., object encapsulating a caller-defined
validation callable returning ``True`` when an arbitrary object passed to
that callable satisfies an arbitrary constraint, suitable for subscripting
(indexing) `PEP 593`_-compliant :attr:`typing.Annotated` type hints
enforcing that validation on :mod:`beartype`-decorated callable parameters
and returns annotated by those hints).
Caveats
----------
**This low-level class is not intended to be externally instantiated**
(e.g., by calling the :meth:`__init__` constructor). This class is *only*
intended to be internally instantiated by subscripting (indexing) the
higher-level :class:`Is` class factory.
Attributes
----------
is_valid : Callable[[Any,], bool]
**Validator** (i.e., caller-defined callable accepting a single
arbitrary object and returning either ``True`` if that object satisfies
an arbitrary constraint *or* ``False`` otherwise).
_is_valid_code : str
**Validator code** (i.e., Python code snippet validating the
previously localized parameter or return value against the same
validation performed by the :meth:`is_valid` function). For efficiency,
callers validating data through dynamically generated code (e.g., the
:func:`beartype.beartype` decorator) rather than standard function
calls (e.g., the private :mod:`beartype._decor._hint._pep._error`
subpackage) should prefer :attr:`is_valid_code` to :meth:`is_valid`.
Despite performing the same validation as the :meth:`is_valid`
callable, this code avoids the additional stack frame imposed by
calling that callable and thus constitutes an optimization.
_is_valid_code_locals : CallableScope
**Validator code local scope** (i.e., dictionary mapping from the name
to value of each local attribute referenced in :attr:`code`) required
to dynamically compile this validator code into byte code at runtime.
_get_repr : Callable[[], str]
**Representer** (i.e., caller-defined callable accepting *no* arguments
returning a machine-readable representation of this validator).
Technically, that representation *could* be passed by the caller rather
than this callable dynamically generating that representation.
Pragmatically, generating that representation is sufficiently slow for
numerous types of validators that deferring their generation until
required by a call to the :meth:`__repr__` dunder method externally
called by a call to the :func:`repr` builtin` on this validator is
effectively mandatory. Data validators whose representations are
particularly slow to generate include:
* The :class:`Is` class subscripted by a lambda rather than non-lambda
function. Generating the representation of that class subscripted by
a non-lambda function only requires introspecting the name of that
function and is thus trivially fast. However, lambda functions have
no names and are thus *only* distinguishable by their source code;
ergo, generating the representation of that class subscripted by a
lambda function requires parsing the source code of the file
declaring that lambda for the exact substring of that code declaring
that lambda and is thus non-trivially slow.
See Also
----------
:class:`Is`
Class docstring for further details.
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# ..................{ CLASS VARIABLES }..................
# Slot all instance variables defined on this object to minimize the time
# complexity of both reading and writing variables across frequently called
# cache dunder methods. Slotting has been shown to reduce read and write
# costs by approximately ~10%, which is non-trivial.
__slots__ = (
'is_valid',
'_is_valid_code',
'_is_valid_code_locals',
'_get_repr',
)
# ..................{ INITIALIZERS }..................
def __init__(
self,
is_valid: SubscriptedIsValidator,
is_valid_code: str,
is_valid_code_locals: CallableScope,
get_repr: Callable[[], str],
) -> None:
'''
Initialize this object with the passed validation callable, code, and
code local scope.
See the class docstring for usage instructions.
Parameters
----------
is_valid : Callable[[Any,], bool]
**Data is_valid** (i.e., caller-defined callable accepting a single
arbitrary object and returning either ``True`` if that object
satisfies an arbitrary constraint *or* ``False`` otherwise).
is_valid_code : str
**Validator code** (i.e., Python code snippet validating the
previously localized parameter or return value against the same
validation performed by the :func:`is_valid` function). This code:
* *Must* contain one or more ``"{obj}"`` substrings, which external
code generators (e.g., the :func:`beartype.beartype` decorator)
will globally replace at evaluation time with the actual test
subject object to be validated by this code.
* *May* contain one or more ``"{indent}"`` substrings, which such
code generators will globally replace at evaluation time with the
line-oriented indentation required to generate a
valid Python statement embedding this code. For consistency with
`PEP 8`_-compliant and well-established Python style guides, any
additional indentation hard-coded into this code should be
aligned to **four-space indentation.**
is_valid_code_locals : Optional[CallableScope]
**Validator code local scope** (i.e., dictionary mapping from the
name to value of each local attribute referenced in
:attr:`is_valid_code` code) required to dynamically compile this
validator code into byte code at runtime.
get_repr : Callable[[], str]
**Representer** (i.e., caller-defined callable accepting *no*
arguments returning a machine-readable representation of this
validator). Technically, that representation rather than this
callable dynamically generating that representation could be passed
by the caller. Pragmatically, generating that representation is
sufficiently slow for various types of validators that deferring
their generation until required by a call to the :meth:`__repr__`
dunder method externally called by a call to the :func:`repr`
builtin` passed this validator is effectively mandatory. Data
validators whose representations are slow to generate include:
* The :class:`Is` class subscripted by a lambda rather than
non-lambda function. Generating the representation of that class
subscripted by a non-lambda function only requires introspecting
the name of that function and is thus trivially fast. However,
lambda functions have no names and are thus *only*
distinguishable by their source code; ergo, generating the
representation of that class subscripted by a lambda function
requires parsing the source code of the file declaring that
lambda for the exact substring of that code declaring that lambda
and is thus non-trivially slow.
Raises
----------
BeartypeValeSubscriptionException
If either:
* ``is_valid`` is either:
* *Not* callable.
* A C-based rather than pure-Python callable.
* A pure-Python callable accepting two or more arguments.
* ``is_valid_code`` is either:
* *Not* a string.
* A string either:
* Empty.
* Non-empty but **invalid** (i.e., *not* containing the test
subject substring ``{obj}``).
* ``is_valid_locals`` is *not* a dictionary.
* ``get_repr`` is either:
* *Not* callable.
* A C-based rather than pure-Python callable.
* A pure-Python callable accepting one or more arguments.
.. _PEP 8:
https://www.python.org/dev/peps/pep-0008
'''
# If this validator is uncallable, raise an exception.
if not callable(is_valid):
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted argument '
f'{represent_object(is_valid)} not callable.'
)
# Else, this validator is callable.
#
# If this validator is C-based, raise an exception.
elif not is_func_python(is_valid):
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted callable '
f'{repr(is_valid)} not pure-Python (e.g., C-based).'
)
# Else, this validator is pure-Python.
#
# If this validator does *NOT* accept exactly one argument, raise an
# exception.
elif get_func_args_len_standard(
func=is_valid,
exception_cls=BeartypeValeSubscriptionException,
) != 1:
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted callable '
f'{repr(is_valid)} positional or keyword argument count '
f'{get_func_args_len_standard(is_valid)} != 1.'
)
# Else, this validator accepts exactly one argument. Since no further
# validation can be performed on this callable without unsafely calling
# that callable, we accept this callable as is for now.
#
# Note that we *COULD* technically inspect annotations if defined on
# this callable as well. Since this callable is typically defined as a
# lambda, annotations are typically *NOT* defined on this callable.
# If this code is *NOT* a string, raise an exception.
if | |
"iso2": "NG",
"admin_name": "Borno",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Amagunze",
"lat": "6.3306",
"lng": "7.6525",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Enugu",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kaltungo",
"lat": "9.8200",
"lng": "11.3087",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Gombe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Boh",
"lat": "9.7819",
"lng": "11.2788",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Gombe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Iresa-Adu",
"lat": "8.0875",
"lng": "4.3926",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Oyo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gwandu",
"lat": "12.5020",
"lng": "4.6429",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Tambuwal",
"lat": "12.4059",
"lng": "4.6461",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Sokoto",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kankia",
"lat": "12.5464",
"lng": "7.8225",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Katsina",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ode-Irele",
"lat": "6.4942",
"lng": "4.8704",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ondo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Gantsa",
"lat": "11.6631",
"lng": "9.7264",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Jigawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kwale",
"lat": "5.7077",
"lng": "6.4340",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Igumale",
"lat": "6.7979",
"lng": "7.9679",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Langtang",
"lat": "9.1416",
"lng": "9.7910",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Plateau",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dass",
"lat": "10.0007",
"lng": "9.5160",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bauchi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bajoga",
"lat": "10.8515",
"lng": "11.4317",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Gombe",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Agenebode",
"lat": "7.1051",
"lng": "6.6938",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Edo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ogoja",
"lat": "6.6584",
"lng": "8.7992",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Igueben",
"lat": "6.6018",
"lng": "6.2428",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Edo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Silame",
"lat": "13.0392",
"lng": "4.8459",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Sokoto",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Owode",
"lat": "6.9485",
"lng": "3.5056",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ogun",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ikole-Ekiti",
"lat": "7.7915",
"lng": "5.5087",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ekiti",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kankara",
"lat": "11.9311",
"lng": "7.4111",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Katsina",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mangu",
"lat": "9.5206",
"lng": "9.0977",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Plateau",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Argungu",
"lat": "12.7448",
"lng": "4.5251",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Birnin Magaji",
"lat": "12.5592",
"lng": "6.8946",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Zamfara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ogwashi-Uku",
"lat": "6.1781",
"lng": "6.5246",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kunchi",
"lat": "12.5026",
"lng": "8.2709",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ezillo",
"lat": "6.4293",
"lng": "7.8179",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ebonyi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ikem",
"lat": "6.7799",
"lng": "7.7148",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Enugu",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dange",
"lat": "12.8531",
"lng": "5.3457",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Sokoto",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Egbema",
"lat": "5.5443",
"lng": "6.7609",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Imo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Maigatari",
"lat": "12.8078",
"lng": "9.4452",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Jigawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ipetumodu",
"lat": "7.5215",
"lng": "4.4448",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Osun",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Nembe",
"lat": "4.5367",
"lng": "6.4033",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bayelsa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ekeremor",
"lat": "5.0581",
"lng": "5.7805",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bayelsa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dambam",
"lat": "11.6789",
"lng": "10.7079",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bauchi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Jibia",
"lat": "13.0938",
"lng": "7.2262",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Katsina",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kiyawa",
"lat": "11.7844",
"lng": "9.6069",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Jigawa",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bunza",
"lat": "12.0882",
"lng": "4.0152",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kebbi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Itas",
"lat": "11.8575",
"lng": "9.9639",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Bauchi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oju",
"lat": "6.8453",
"lng": "8.4191",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Shinkafi",
"lat": "13.0730",
"lng": "6.5057",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Zamfara",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Lessel",
"lat": "7.1273",
"lng": "9.0198",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Adikpo",
"lat": "6.8900",
"lng": "9.2335",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Benue",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Iloffa",
"lat": "8.0934",
"lng": "5.1423",
"country": "Nigeria",
"iso2": "NG", | |
dipole : float, optional
Dipole, [debye]
Psat : float or callable, optional
Vapor pressure at a given temperature, or callable for the same [Pa]
eos : object, optional
Equation of State object after :obj:`thermo.eos.GCEOS`
Notes
-----
A string holding each method's name is assigned to the following variables
in this module, intended as the most convenient way to refer to a method.
To iterate over all methods, use the lists stored in
:obj:`surface_tension_methods` and :obj:`volume_liquid_methods_P` for low
and high pressure methods respectively.
Low pressure methods:
**PERRYDIPPR**:
A simple polynomial as expressed in [1]_, with data available for
344 fluids. Temperature limits are available for all fluids. Believed
very accurate.
**VDI_PPDS**:
Coefficients for a equation form developed by the PPDS, published
openly in [3]_. Valid up to the critical temperature, and extrapolates
to very low temperatures well.
**MMSNM0FIT**:
Uses a fit coefficient for better accuracy in the :obj:`SNM0` method,
Coefficients available for 73 fluids from [2]_. Valid to the critical
point.
**HTCOSTALDFIT**:
A method with two fit coefficients to the :obj:`COSTALD` method.
Coefficients available for 192 fluids, from [3]_. Valid to the critical
point.
**RACKETTFIT**:
The :obj:`Racket` method, with a fit coefficient Z_RA. Data is
available for 186 fluids, from [3]_. Valid to the critical point.
**CRC_INORG_L**:
Single-temperature coefficient linear model in terms of mass density
for the density of inorganic liquids; converted to molar units
internally. Data is available for 177 fluids normally valid over a
narrow range above the melting point, from [4]_; described in
:obj:`CRC_inorganic`.
**MMSNM0**:
CSP method, described in :obj:`SNM0`.
**HTCOSTALD**:
CSP method, described in :obj:`COSTALD`.
**YEN_WOODS_SAT**:
CSP method, described in :obj:`Yen_Woods_saturation`.
**RACKETT**:
CSP method, described in :obj:`Rackett`.
**YAMADA_GUNN**:
CSP method, described in :obj:`Yamada_Gunn`.
**BHIRUD_NORMAL**:
CSP method, described in :obj:`Bhirud_normal`.
**TOWNSEND_HALES**:
CSP method, described in :obj:`Townsend_Hales`.
**CAMPBELL_THODOS**:
CSP method, described in :obj:`Campbell_Thodos`.
**COOLPROP**:
CoolProp external library; with select fluids from its library.
Range is limited to that of the equations of state it uses, as
described in [5]_. Very slow.
**CRC_INORG_L_CONST**:
Constant inorganic liquid densities, in [4]_.
**VDI_TABULAR**:
Tabular data in [6]_ along the saturation curve; interpolation is as
set by the user or the default.
High pressure methods:
**COSTALD_COMPRESSED**:
CSP method, described in :obj:`COSTALD_compressed`. Calculates a
low-pressure molar volume first, using `T_dependent_property`.
**COOLPROP**:
CoolProp external library; with select fluids from its library.
Range is limited to that of the equations of state it uses, as
described in [5]_. Very slow, but unparalled in accuracy for pressure
dependence.
**EOS**:
Equation of state provided by user.
See Also
--------
Yen_Woods_saturation
Rackett
Yamada_Gunn
Townsend_Hales
Bhirud_normal
COSTALD
Campbell_Thodos
SNM0
CRC_inorganic
COSTALD_compressed
References
----------
.. [1] <NAME>, and <NAME>. Perry's Chemical Engineers' Handbook,
8E. McGraw-Hill Professional, 2007.
.. [2] <NAME>., <NAME>, <NAME>, and <NAME>.
"A Simplified Method for Calculating Saturated Liquid Densities."
Fluid Phase Equilibria 224, no. 2 (October 1, 2004): 157-67.
doi:10.1016/j.fluid.2004.06.054
.. [3] Hankinson, <NAME>., and <NAME>. "A New Correlation for
Saturated Densities of Liquids and Their Mixtures." AIChE Journal
25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412
.. [4] <NAME>., <NAME>, and <NAME>. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
.. [5] <NAME>., <NAME>, <NAME>, and <NAME>.
"Pure and Pseudo-Pure Fluid Thermophysical Property Evaluation and the
Open-Source Thermophysical Property Library CoolProp." Industrial &
Engineering Chemistry Research 53, no. 6 (February 12, 2014):
2498-2508. doi:10.1021/ie4033999. http://www.coolprop.org/
.. [6] Gesellschaft, <NAME>., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
name = 'Liquid molar volume'
units = 'mol/m^3'
interpolation_T = None
'''No interpolation transformation by default.'''
interpolation_P = None
'''No interpolation transformation by default.'''
interpolation_property = None
'''No interpolation transformation by default.'''
interpolation_property_inv = None
'''No interpolation transformation by default.'''
tabular_extrapolation_permitted = True
'''Allow tabular extrapolation by default.'''
property_min = 0
'''Mimimum valid value of liquid molar volume. It should normally occur at the
triple point, and be well above this.'''
property_max = 2e-3
'''Maximum valid value of liquid molar volume. Generous limit.'''
ranked_methods = [PERRYDIPPR, VDI_PPDS, COOLPROP, MMSNM0FIT, VDI_TABULAR,
HTCOSTALDFIT, RACKETTFIT, CRC_INORG_L,
CRC_INORG_L_CONST, MMSNM0, HTCOSTALD,
YEN_WOODS_SAT, RACKETT, YAMADA_GUNN,
BHIRUD_NORMAL, TOWNSEND_HALES, CAMPBELL_THODOS, EOS]
'''Default rankings of the low-pressure methods.'''
ranked_methods_P = [COOLPROP, COSTALD_COMPRESSED, EOS]
'''Default rankings of the high-pressure methods.'''
def __init__(self, MW=None, Tb=None, Tc=None, Pc=None, Vc=None, Zc=None,
omega=None, dipole=None, Psat=None, CASRN='', eos=None):
self.CASRN = CASRN
self.MW = MW
self.Tb = Tb
self.Tc = Tc
self.Pc = Pc
self.Vc = Vc
self.Zc = Zc
self.omega = omega
self.dipole = dipole
self.Psat = Psat
self.eos = eos
self.Tmin = None
'''Minimum temperature at which no method can calculate the
liquid molar volume under.'''
self.Tmax = None
'''Maximum temperature at which no method can calculate the
liquid molar volume above.'''
self.tabular_data = {}
'''tabular_data, dict: Stored (Ts, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators = {}
'''tabular_data_interpolators, dict: Stored (extrapolator,
spline) tuples which are interp1d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.tabular_data_P = {}
'''tabular_data_P, dict: Stored (Ts, Ps, properties) for any
tabular data; indexed by provided or autogenerated name.'''
self.tabular_data_interpolators_P = {}
'''tabular_data_interpolators_P, dict: Stored (extrapolator,
spline) tuples which are interp2d instances for each set of tabular
data; indexed by tuple of (name, interpolation_T, interpolation_P,
interpolation_property, interpolation_property_inv) to ensure that
if an interpolation transform is altered, the old interpolator which
had been created is no longer used.'''
self.sorted_valid_methods = []
'''sorted_valid_methods, list: Stored methods which were found valid
at a specific temperature; set by `T_dependent_property`.'''
self.sorted_valid_methods_P = []
'''sorted_valid_methods_P, list: Stored methods which were found valid
at a specific temperature; set by `TP_dependent_property`.'''
self.user_methods = []
'''user_methods, list: Stored methods which were specified by the user
in a ranked order of preference; set by `T_dependent_property`.'''
self.user_methods_P = []
'''user_methods_P, list: Stored methods which were specified by the user
in a ranked order of preference; set by `TP_dependent_property`.'''
self.all_methods = set()
'''Set of all low-pressure methods available for a given CASRN and
properties; filled by :obj:`load_all_methods`.'''
self.all_methods_P = set()
'''Set of all high-pressure methods available for a given CASRN and
properties; filled by :obj:`load_all_methods`.'''
self.load_all_methods()
def load_all_methods(self):
r'''Method which picks out coefficients for the specified chemical
from the various dictionaries and DataFrames storing it. All data is
stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,
:obj:`all_methods` and obj:`all_methods_P` as a set of methods for
which the data exists for.
Called on initialization only. See the source code for the variables at
which the coefficients are stored. The coefficients can safely be
altered once the class is initialized. This method can be called again
to reset the parameters.
'''
methods = []
methods_P = []
Tmins, Tmaxs = [], []
if has_CoolProp and self.CASRN in coolprop_dict:
methods.append(COOLPROP); methods_P.append(COOLPROP)
self.CP_f = coolprop_fluids[self.CASRN]
Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)
if self.CASRN in CRC_inorg_l_data.index:
methods.append(CRC_INORG_L)
_, self.CRC_INORG_L_MW, self.CRC_INORG_L_rho, self.CRC_INORG_L_k, self.CRC_INORG_L_Tm, self.CRC_INORG_L_Tmax = _CRC_inorg_l_data_values[CRC_inorg_l_data.index.get_loc(self.CASRN)].tolist()
Tmins.append(self.CRC_INORG_L_Tm); Tmaxs.append(self.CRC_INORG_L_Tmax)
if self.CASRN in Perry_l_data.index:
methods.append(PERRYDIPPR)
_, C1, C2, C3, C4, self.DIPPR_Tmin, self.DIPPR_Tmax = _Perry_l_data_values[Perry_l_data.index.get_loc(self.CASRN)].tolist()
self.DIPPR_coeffs = [C1, C2, C3, C4]
Tmins.append(self.DIPPR_Tmin); Tmaxs.append(self.DIPPR_Tmax)
if self.CASRN in VDI_PPDS_2.index:
methods.append(VDI_PPDS)
_, MW, Tc, rhoc, A, B, C, D = _VDI_PPDS_2_values[VDI_PPDS_2.index.get_loc(self.CASRN)].tolist()
self.VDI_PPDS_coeffs = [A, B, C, D]
self.VDI_PPDS_MW = MW
self.VDI_PPDS_Tc = Tc
self.VDI_PPDS_rhoc = rhoc
Tmaxs.append(self.VDI_PPDS_Tc)
if self.CASRN in _VDISaturationDict:
methods.append(VDI_TABULAR)
Ts, props = VDI_tabular_data(self.CASRN, 'Volume (l)')
self.VDI_Tmin = Ts[0]
self.VDI_Tmax = Ts[-1]
self.tabular_data[VDI_TABULAR] = (Ts, props)
Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)
if self.Tc and self.CASRN in COSTALD_data.index:
methods.append(HTCOSTALDFIT)
self.COSTALD_Vchar = float(COSTALD_data.at[self.CASRN, 'Vchar'])
self.COSTALD_omega_SRK = float(COSTALD_data.at[self.CASRN, 'omega_SRK'])
Tmins.append(0); Tmaxs.append(self.Tc)
if self.Tc and self.Pc and self.CASRN in COSTALD_data.index and not np.isnan(COSTALD_data.at[self.CASRN, 'Z_RA']):
methods.append(RACKETTFIT)
self.RACKETT_Z_RA = float(COSTALD_data.at[self.CASRN, 'Z_RA'])
Tmins.append(0); Tmaxs.append(self.Tc)
if self.CASRN in CRC_inorg_l_const_data.index:
methods.append(CRC_INORG_L_CONST)
self.CRC_INORG_L_CONST_Vm = float(CRC_inorg_l_const_data.at[self.CASRN, 'Vm'])
# Roughly data at STP; not guaranteed however; not used for Trange
if all((self.Tc, self.Vc, self.Zc)):
methods.append(YEN_WOODS_SAT)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.Zc)):
methods.append(RACKETT)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.omega)):
methods.append(YAMADA_GUNN)
methods.append(BHIRUD_NORMAL)
| |
PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient', 'Unified Procedure Step'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# SliceLocation
0x00201041L: {
'CT IMAGE IOD': ['Image'],
None: ['Image', 'Dose'],
'NM IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'PET IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
},
# AcquisitionDeviceProcessingCode
0x00181401L: {
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
None: ['Image'],
},
# TransducerType
0x00186031L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# AcquisitionTimeSynchronized
0x00181800L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Frame of Reference'],
None: ['Frame of Reference'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Frame of Reference'],
'12-LEAD ECG IOD': ['Frame of Reference'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Frame of Reference'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'BASIC CARDIAC EP IOD': ['Frame of Reference'],
'BASIC VOICE AUDIO IOD': ['Frame of Reference'],
'HEMODYNAMIC IOD': ['Frame of Reference'],
'US IMAGE IOD': ['Frame of Reference'],
'AMBULATORY ECG IOD': ['Frame of Reference'],
'GENERAL ECG IOD': ['Frame of Reference'],
'XRF IMAGE IOD': ['Frame of Reference'],
'RESPIRATORY WAVEFORM IOD': ['Frame of Reference'],
'X-RAY RADIATION DOSE SR IOD': ['Frame of Reference'],
'GENERAL AUDIO WAVEFORM IOD': ['Frame of Reference'],
'PROCEDURE LOG IOD': ['Frame of Reference'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Frame of Reference'],
'ARTERIAL PULSE WAVEFORM IOD': ['Frame of Reference'],
},
# ClinicalTrialProtocolName
0x00120021L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# PercentPhaseFieldOfView
0x00180094L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# NumberOfSlices
0x00540081L: {
'NM IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
},
# GridFrameOffsetVector
0x3004000CL: {
'RT DOSE IOD': ['Dose'],
None: ['Dose'],
},
# VideoImageFormatAcquired
0x00181022L: {
'SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Equipment'],
None: ['Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED CDA IOD': ['Equipment'],
},
# ProcedureStepProgressInformationSequence
0x00741002L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
None: ['Unified Procedure Step'],
},
# ContentDate
0x00080023L: {
'SC IMAGE IOD': ['Image'],
| |
<gh_stars>1-10
"""
Let's do this!
"""
from dis import (
opmap, HAVE_ARGUMENT,
hasjrel, hasjabs, haslocal, hasname, hasconst, hasfree,
get_instructions
)
import types
CO_FLAGS = {
'OPTIMIZED': 0x1,
'NEWLOCALS': 0x2,
'VARARGS': 0x4,
'VARKEYWORDS': 0x8,
'NESTED': 0x10,
'GENERATOR': 0x20,
'NOFREE': 0x40,
'COROUTINE': 0x80,
'ITERABLE_COROUTINE': 0x100,
'ASYNC_GENERATOR': 0x200
}
def asm(*args):
"""
Decorator to assemble a function from a docstring in my imaginary asm
format for python bytecode
"""
if len(args) == 1 and callable(args[0]):
return _asm(args[0], ())
else:
def decor(f):
return _asm(f, *args)
return decor
def _asm(f, *args):
"""
Interior decorator
"""
doc, source = f.__doc__.split(':::asm')
co_in = f.__code__
machine = Assembler(
source,
doc=doc,
code=co_in,
args=args
)
co_gen = machine.assemble()
co_out = types.CodeType(
co_in.co_argcount,
co_in.co_kwonlyargcount,
co_gen.co_nlocals,
co_gen.co_stacksize,
co_gen.co_flags,
co_gen.co_code,
co_gen.co_consts,
co_gen.co_names,
co_gen.co_varnames,
co_in.co_name,
co_in.co_filename,
co_gen.co_firstlineno,
co_gen.co_lnotab,
co_gen.co_freevars,
co_gen.co_cellvars
)
# feel kinda iffy about this
if co_gen.co_freevars:
return co_out
result = types.FunctionType(
code=co_out,
globals=f.__globals__,
argdefs=f.__defaults__,
name=co_in.co_name,
)
result.__doc__ = doc
return result
def preprocess(source):
"""
Remove comments, dedent, split into lines and sections, record line
numbers with each line for the code section
allow first line of section to be after the section header,
for example ``.stacksize 4``
"""
lines = [line.split(';')[0].strip() for line in source.splitlines()]
sections = {'unknown': []}
current_section = 'unknown'
for lno, line in enumerate(lines):
if not line:
continue
if line.startswith('.'):
tokens = line[1:].split()
sections[tokens[0]] = []
current_section = tokens[0]
if len(tokens) > 1:
if current_section == 'code':
sections[tokens[0]].append((lno, ''.join(tokens[1:])))
else:
sections[tokens[0]].append(''.join(tokens[1:]))
continue
if current_section == 'code':
sections[current_section].append((lno, line))
else:
sections[current_section].append(line)
return sections
class Assembler:
"""
I *think* I want to make this a class
"""
def __init__(self, source=None, doc=None, code=None, args=None):
"""
Can be passed source to be preprocessed or
you can add sections manually (mainly for
testing convenience)
"""
if source is not None:
self.src = preprocess(source)
else:
self.src = {}
self.flags = 0
self.fl = 0
self.varnames = ()
if code is not None:
self.fl = code.co_firstlineno
self.flags = code.co_flags
self.varnames = code.co_varnames
self.targets = {}
self.code = None
self.argcount = len(self.varnames)
self.locals = list(self.varnames)
self.doc = doc
self.args = args
if doc is not None:
self.lnodoc = len(doc.splitlines())
else:
self.lnodoc = 0
def assemble(self):
"""
Assemble source into a types.CodeType object and return it
"""
self.assemble_stacksize()
self.assemble_flags()
self.assemble_params()
self.assemble_locals()
self.assemble_names()
self.assemble_freevars()
self.assemble_cellvars()
self.assemble_consts()
self.assemble_code()
self.assemble_lnotab()
return types.CodeType(
self.argcount,
0,
len(self.varnames),
self.stacksize,
self.flags,
self.code,
self.consts,
self.names,
self.varnames,
'',
'',
self.fl,
self.lnotab,
self.freevars,
self.cellvars
)
def assemble_stacksize(self):
"""
obviously - come back to this when its time to
add reasonable error messages
"""
self.stacksize = int(self.src['stacksize'][0])
def assemble_consts(self):
"""
Consts must be in the .consts section one per line in
the form ``name=expression`` to give an alias to the constant
or just ``expression`` if you don't care to give it an alias
and refer to it numerically in the assembly code.
As with the CPython compiler, the first constant in the list
will be the docstring. This will be given the name "__doc__"
"""
consts = [self.doc]
aliases = {'__doc__': 0}
for idx, line in enumerate(self.src.get('consts', ())):
tokens = [t.strip() for t in line.split('=')]
args = self.args
if len(tokens) == 1:
consts.append(eval(tokens[0]))
else:
consts.append(eval(tokens[1]))
aliases[tokens[0].lower()] = idx + 1
self.consts = tuple(consts)
self.consts_alias = aliases
def assemble_params(self):
"""
Function parameters. These can either be specified
in a "params" directive or just taken from the initial
function.
These can be multiple on one line, comma separated
"""
for line in self.src.get('params', ()):
self.locals.extend([s.strip() for s in line.split(',')])
self.varnames = tuple(self.locals)
self.argcount = len(self.varnames)
def assemble_locals(self):
"""
Local variables in addition to parameters
These can be multiple on one line, comma separated
"""
for line in self.src.get('locals', ()):
self.locals.extend([s.strip() for s in line.split(',')])
self.varnames = tuple(self.locals)
def assemble_flags(self):
"""
Flags can be given as the name of the flag or hexidecimal
literal, i.e. 0x4
These can be multiple on one line, comma separated
"""
if 'flags' not in self.src:
return
for line in self.src.get('flags', ()):
flags = (s.strip() for s in line.split(','))
for flagstr in flags:
try:
flag = int(flagstr, 16)
except ValueError:
flag = CO_FLAGS[flagstr.upper()]
self.flags |= flag
def assemble_names(self):
"""
Names
These can be multiple on one line, comma separated
"""
names = []
for line in self.src.get('names', ()):
names.extend([s.strip() for s in line.split(',')])
self.names = tuple(names)
def assemble_freevars(self):
"""
Closure stuff
These can be multiple on one line, comma separated
"""
freevars = []
for line in self.src.get('freevars', ()):
freevars.extend([s.strip() for s in line.split(',')])
self.freevars = tuple(freevars)
def assemble_cellvars(self):
"""
Closure stuff
These can be multiple on one line, comma separated
"""
cellvars = []
for line in self.src.get('cellvars', ()):
cellvars.extend([s.strip() for s in line.split(',')])
self.cellvars = tuple(cellvars)
def assemble_code(self):
"""
Assuming everything else has gone correctly, produce the bytecode
"""
bytecode = []
bytecode_lno = []
pos = 0
for lno, line in self.src['code']:
line = self._extract_target(line, pos)
if not line:
continue
tokens = line.split()
op = tokens[0].upper()
opcode = opmap[op]
bytecode.append(opcode)
bytecode_lno.append(lno)
if opcode >= HAVE_ARGUMENT:
arg = tokens[1]
try:
arg = int(arg)
except ValueError:
pass
bytecode.append(arg)
else:
bytecode.append(0)
pos += 2
self.bytecode = bytecode
self.bytecode_lno = bytecode_lno
self._fix_arguments()
self.code = bytes(bytecode)
def _extract_target(self, line, pos):
"""
Extract a target (if any) from the line and return what remains.
Add the target position to the dict of targets.
"""
tokens = line.split(':')
if len(tokens) == 1:
return line
target, ops = tokens
self.targets[target] = pos
return ops
def _fix_arguments(self):
"""
Replace target tuples in bytecode with correct positions or
variable indices
"""
# first pass, replace non-int arguments with integer values
for idx in range(0, len(self.bytecode), 2):
arg = self.bytecode[idx+1]
if not isinstance(arg, str):
continue
if self.bytecode[idx] in hasjabs:
self.bytecode[idx+1] = self.targets[arg]
elif self.bytecode[idx] in hasjrel:
self.bytecode[idx+1] = (
self.targets[arg] - (idx + 2)
)
elif self.bytecode[idx] in haslocal:
self.bytecode[idx+1] = self.locals.index(arg)
elif self.bytecode[idx] in hasname:
self.bytecode[idx+1] = self.names.index(arg)
elif self.bytecode[idx] in hasconst:
self.bytecode[idx+1] = self.consts_alias[arg]
elif self.bytecode[idx] in hasfree:
self.bytecode[idx+1] = self._find_freecell(arg)
# second pass (quadratic), use EXTENDED_ARG ops as needed
# to reduce down arguments to < 256
reduced = False
while not reduced:
reduced = self._reduce_next_arg()
def _find_freecell(self, arg):
"""
Locate the free/cell index of the free/cell variable by name
"""
if arg in self.cellvars:
return self.cellvars.index(arg)
if arg in self.freevars:
return self.freevars.index(arg) + len(self.cellvars)
def _reduce_next_arg(self):
"""
Locate the first instruction with an argument over 255 and
reduce it using EXTENDED_ARG
"""
reduced = True
for idx in range(0, len(self.bytecode), 2):
arg = self.bytecode[idx+1]
if arg > 255:
reduced = False
if idx > 0 and idx-2 == opmap['EXTENDED_ARG']:
self.bytecode[idx-1] += arg >> 8
self.bytecode[idx+1] %= 256
else:
self.bytecode[idx+1] %= 256
self._insert_extended_arg(idx, arg >> 8)
break
return reduced
def _insert_extended_arg(self, pos, val):
"""
Insert an EXTENDED_ARG opcode at position pos with argument val
Then adjust jabs and rel values.
"""
self.bytecode.insert(pos, val)
self.bytecode.insert(pos, opmap['EXTENDED_ARG'])
self.bytecode_lno.insert(pos // 2, 0)
for idx in range(0, pos, 2):
if (self.bytecode[idx] in hasjrel and
self._get_full_arg(idx) + idx + 2 > pos
):
self.bytecode[idx+1] += 2
for idx in range(0, len(self.bytecode), 2):
if self.bytecode[idx] in hasjabs and self._get_full_arg(idx) > pos:
self.bytecode[idx+1] += 2
def _get_full_arg(self, pos):
"""
Get the full argument value (augmented by previous EXTENDED_ARG
instructions) of the instruction at position pos)
"""
arg = self.bytecode[pos+1]
mult = 1
while pos > 0 and self.bytecode[pos-2] == opmap['EXTENDED_ARG']:
mult *= 256
arg += self.bytecode[pos-1] * mult
pos -= 2
return arg
def assemble_lnotab(self):
"""
This is a hot mess
"""
lnotab = []
current = 0
dist = 2
last_entry = 0
for entry in self.bytecode_lno:
if entry == 0:
dist += 2
continue
entry += self.lnodoc + 1
more_entry = 0
while entry - last_entry > 127:
lnotab.append(0)
lnotab.append(127)
entry -= 127; more_entry += 127
lnotab.append(current)
lnotab.append(entry - last_entry)
last_entry = entry + more_entry
current = dist
dist = 2
self.lnotab = bytes(lnotab)
def dis(func):
"""
Disassemble a function into cpython_assembly format
"""
co = func.__code__
result = []
if func.__doc__:
| |
type=desc,
critical=cbit,
length=clen,
algorithm=_HI_ALGORITHM.get(_algo),
signature=_sign,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return hip_signature
def _read_para_echo_request_unsigned(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``ECHO_REQUEST_UNSIGNED`` parameter.
Structure of HIP ``ECHO_REQUEST_UNSIGNED`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opaque data (variable length) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Echo_Request_Unsigned: Parsed parameter data.
"""
_data = self._read_fileng(clen)
echo_request_unsigned = dict(
type=desc,
critical=cbit,
length=clen,
data=_data,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return echo_request_unsigned
def _read_para_echo_response_unsigned(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``ECHO_RESPONSE_UNSIGNED`` parameter.
Structure of HIP ``ECHO_RESPONSE_UNSIGNED`` parameter [:rfc:`7401`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opaque data (variable length) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Echo_Response_Unsigned: Parsed parameter data.
"""
_data = self._read_fileng(clen)
echo_response_unsigned = dict(
type=desc,
critical=cbit,
length=clen,
data=_data,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return echo_response_unsigned
def _read_para_relay_from(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``RELAY_FROM`` parameter.
Structure of HIP ``RELAY_FROM`` parameter [:rfc:`5770`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Port | Protocol | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Address |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Relay_From: Parsed parameter data.
Raises:
ProtocolError: If ``clen`` is **NOT** ``20``.
"""
if clen != 20:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_port = self._read_unpack(2)
_ptcl = self._read_unpack(1)
_resv = self._read_fileng(1)
_addr = self._read_fileng(16)
relay_from = dict(
type=desc,
critical=cbit,
length=clen,
port=_port,
protocol=TP_PROTO.get(_ptcl),
ip=ipaddress.ip_address(_addr),
)
return relay_from
def _read_para_relay_to(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``RELAY_TO`` parameter.
Structure of HIP ``RELAY_TO`` parameter [:rfc:`5770`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Port | Protocol | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Address |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Relay_To: Parsed parameter data.
Raises:
ProtocolError: If ``clen`` is **NOT** ``20``.
"""
if clen != 20:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_port = self._read_unpack(2)
_ptcl = self._read_unpack(1)
_resv = self._read_fileng(1)
_addr = self._read_fileng(16)
relay_to = dict(
type=desc,
critical=cbit,
length=clen,
port=_port,
protocol=TP_PROTO.get(_ptcl),
ip=ipaddress.ip_address(_addr),
)
return relay_to
def _read_para_overlay_ttl(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``OVERLAY_TTL`` parameter.
Structure of HIP ``OVERLAY_TTL`` parameter [:rfc:`6078`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| TTL | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Relay_To: Parsed parameter data.
Raises:
ProtocolError: If ``clen`` is **NOT** ``4``.
"""
if clen != 4:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_ttln = self._read_unpack(2)
overlay_ttl = dict(
type=desc,
critical=cbit,
length=clen,
ttl=_ttln,
)
return overlay_ttl
def _read_para_route_via(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``ROUTE_VIA`` parameter.
Structure of HIP ``ROUTE_VIA`` parameter [:rfc:`6028`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HIT #1 |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
. . .
. . .
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HIT #n |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_Route_Via: Parsed parameter data.
Raises:
ProtocolError: If the parameter is malformed.
"""
if (clen - 4) % 16 != 0:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_flag = self._read_binary(2)
_resv = self._read_fileng(2)
_addr = list()
for _ in range((clen - 4) // 16):
_addr.append(ipaddress.ip_address(self._read_fileng(16)))
route_via = dict(
type=desc,
critical=cbit,
length=clen,
flags=dict(
symmetric=bool(int(_flag[0], base=2)),
must_follow=bool(int(_flag[1], base=2)),
),
ip=tuple(_addr),
)
return route_via
def _read_para_from(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``FROM`` parameter.
Structure of HIP ``FROM`` parameter [:rfc:`8004`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| Address |
| |
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_From: Parsed parameter data.
Raises:
ProtocolError: If ``clen`` is **NOT** ``16``.
"""
if clen != 16:
raise ProtocolError(f'HIPv{version}: [ParamNo {code}] invalid format')
_addr = self._read_fileng(16)
from_ = dict(
type=desc,
critical=cbit,
length=clen,
ip=ipaddress.ip_address(_addr),
)
return from_
def _read_para_rvs_hmac(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``RVS_HMAC`` parameter.
Structure of HIP ``RVS_HMAC`` parameter [:rfc:`8004`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| HMAC |
/ /
/ +-------------------------------+
| | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Args:
code (int): parameter code
cbit (bool): critical bit
clen (int): length of contents
Keyword Args:
desc (pcapkit.const.hip.parameter.Parameter): parameter type
length (int): remaining packet length
version (Literal[1, 2]): HIP protocol version
Returns:
DataType_Param_RVS_HMAC: Parsed parameter data.
"""
_hmac = self._read_fileng(clen)
rvs_hmac = dict(
type=desc,
critical=cbit,
length=clen,
hmac=_hmac,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return rvs_hmac
def _read_para_via_rvs(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument
"""Read HIP ``VIA_RVS`` parameter.
Structure of HIP ``VIA_RVS`` parameter [:rfc:`6028`]:
.. code-block:: text
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 | |
<reponame>binary-bisam/LeleNet<filename>LeleNet_trn.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 17:56:34 2021
@author: Manuel
Full implementation
runfrom terminal: python ~/LeleNet/py3/LeleNet_trn.py "U-Net" 10 40
"""
__author__ = "<NAME>"
#### parse arguments-----------------------------------------------------------
# Import arguments
import argparse, pickle
def parseArguments():
parser = argparse.ArgumentParser()
# Positional mandatory arguments
parser.add_argument("model", help = "Model; one in U-Net, FCDenseNet)",\
type = str)
parser.add_argument("bs", help = "Batchsize (int)",\
type = int)
parser.add_argument("ep", help = "Training epochs (int)",\
type = int)
# Optional arguments
parser.add_argument("-lr", "--lr",\
help = "Initial learning rate (float).",\
type = float, default = 1e-4)
parser.add_argument("-lrd", "--lrd",\
help = "Learning rate decay factor (float).",\
type = float, default = 0.95)
parser.add_argument("-lrs", "--lrs",\
help = "Learning rate decay step size (int).",\
type = int, default = 2)
parser.add_argument("-esp", "--esp",\
help = "Early stopping patience (int).",\
type = int, default = None)
parser.add_argument("-op", "--op",\
help = "Optimizer. 'Adam', 'rms', or 'sgd'.",\
type = str, default = "rms")
parser.add_argument("-ki", "--ki",\
help = "Kernel initialiser.",\
type = str, default = None)
parser.add_argument("-do", "--do",\
help = "Dropout rate.",\
type = float, default = 0.1)
parser.add_argument("-xf", "--xf",\
help = "Image format; either png, jpg, or tif.",\
type = str, default = "png")
parser.add_argument("-yf", "--yf",\
help = "Image format; either png, jpg, or tif.",\
type = str, default = "png")
parser.add_argument("-imgr", "--imgr",\
help = "Image x resolution (rows).", type = int,\
default = None)
parser.add_argument("-imgc", "--imgc",\
help = "Image y resolution (columns).", type = int,\
default = None)
parser.add_argument("-imgd", "--imgd",\
help = "Image dimensions (rows = columns).",\
type = int, default = None)
parser.add_argument("-imgdim", "--imgdim",\
help = "X image dimensions (colours).", type = int,\
default = 3)
parser.add_argument("-nc", "--nc",\
help = "Number of classes.", type = int,\
default = None)
parser.add_argument("-ww", "--ww",\
help = ("Weights scaling factor. Inverse weights =" +\
"1/(weights**ww) or 1/math.log(weights, ww)"), \
type = float, default = 0.0)
parser.add_argument("-ws", "--ws",\
help = ("Weight scaling (either 'exp' or 'log'."), \
type = str, default = "exp")
parser.add_argument("-wd", "--wd",\
help = "Alternative working directory.", type = str,\
default = "")
parser.add_argument("-yr", "--yr",\
help = ("Sampling date of the data" +\
"as MM_YYYY. Default: '03_2021'"),\
type = str, default = "03_2021")
parser.add_argument("-r", "--r",\
help = ("Resume from checkpoint. Either 'f' (False;" +\
" default), 't' (True), or date of a specif" +\
"ic training event (folder name)."),\
type = str, default = "f")
parser.add_argument("-save_settings", "--sv",\
help = "Save training settings.", type = bool,\
default = True)
# Parse arguments
args = parser.parse_args()
return args
if __name__ == "__main__":
# Parse the arguments
args = parseArguments()
# debug mode
if False:
import pickle
saved_args = "C:\\Users\\Manuel\\Nextcloud\\Masterarbeit\\py3\\vrs\\train_settings.pkl"
with open(saved_args, "rb") as f:
args = pickle.load(f)
args.wd = "home"
mdl = args.model
bs = args.bs
epochz = args.ep
init_lr = args.lr
decay_lr = args.lrd
step_lr = args.lrs
es_patience = args.esp if args.esp is not None else epochz
optmer = args.op
kernel_init = args.ki
drop = args.do
xf = args.xf
yf = args.yf
imgdim = args.imgdim
ww = args.ww
ws = args.ws
wd = args.wd
year = args.yr
resume_training = args.r
# case insensitive arguments
mdl, optmer, xf, yf, wd, resume_training = mdl.casefold(), optmer.casefold(),\
xf.casefold(), yf.casefold(), wd.casefold(), resume_training.casefold()
#### basic settings------------------------------------------------------------
import platform, sys, datetime, pathlib, os
OS = platform.system()
OS_version = platform.release()
py_version = sys.version
t_start = datetime.datetime.utcnow()
import tensorflow as tf
print("Running on " + OS + " " + OS_version + ".\nPython version: " +
py_version + "\nTensorflow version: " + tf.__version__ +
"\nUTC time (start): " + str(t_start) +
"\nLocal time (start): " + str(datetime.datetime.now()))
# Model (one of "mod_UNet", "mod_FCD")
if mdl in ["u-net", "unet", "mod_unet", "mod_u-net", "u_net"]:
mod = "mod_UNet"
elif mdl in ["fcd", "fcdensenet", "fc-densenet", "fc-dense-net"]:
mod = "mod_FCD"
else:
raise ValueError("Unexpected input for argument 'model': " + str(mdl))
### general directory functions------------------------------------------------
import numpy as np
if wd == "home":
if OS == "Linux":
if platform.release() == "4.18.0-193.60.2.el8_2.x86_64":
wd = "/home/kit/ifgg/mp3890/LeleNet"
else:
wd = "/home/manuel/Nextcloud/Masterarbeit"
elif OS == "Windows":
wd = os.path.join("C:\\", "Users", "Manuel",\
"Nextcloud", "Masterarbeit")
else:
raise Exception("OS not detected.")
elif wd == "":
pydir = os.path.dirname(os.path.realpath(__file__))
wd = os.path.dirname(pydir)
else:
wd = args.wd
def dir_fig(fig_id = None):
if fig_id == None:
return os.path.join(wd, "fig")
else:
return os.path.join(wd, "fig", fig_id)
def dir_dat(dat_id = None):
if dat_id == None:
return os.path.join(wd, "dat")
else:
dat_id = dat_id.split(",")
return os.path.join(wd, "dat", *dat_id)
def dir_out(*out_id):
if len(out_id) < 1:
return os.path.join(wd, "out")
else:
out_lst = list(out_id)
out_ids = os.path.sep.join(out_lst)
return os.path.join(wd, "out", out_ids)
def dir_var(pkl_name = None):
if pkl_name == None:
return os.path.join(wd, "py3", "vrs")
else:
return os.path.join(wd, "py3", "vrs", pkl_name + ".pkl")
def save_var(variables, name):
os.makedirs(dir_var(), exist_ok = True)
with open(dir_var(pkl_name = name), "wb") as f:
pickle.dump(variables, f)
def get_var(name):
with open(dir_var(pkl_name = name), "rb") as f:
return pickle.load(f)
os.chdir(wd)
with open(dir_out("System_info.txt"), "w") as f:
f.write("Most recent run on " + OS + " " + OS_version +
".\nPython version: " +
py_version + "\nTensorflow version: " + tf.__version__ +
"\nUTC time (start): " + str(t_start) +
"\nLocal time (start): " + str(datetime.datetime.now()))
if args.sv:
save_var(args, "train_settings")
print("Saved training settings.")
#### data preparation directory functions--------------------------------------
def dir_omk(plot_id = None, myear = None, type_ext = ""):
# returns list!
if plot_id == None:
if myear == None:
return dir_dat("omk")
else:
return os.path.join(dir_dat("omk"), myear)
else:
if myear == None:
return list(pathlib.Path(dir_dat("omk")) \
.glob("**/*" + plot_id + type_ext + ".tif"))
else:
return list(pathlib.Path(os.path.join(dir_dat("omk"), myear)) \
.glob("**/*" + plot_id + type_ext + ".tif"))
def dir_tls(myear = None, dset = None, plot_id = None):
if plot_id == None:
if myear == None:
if dset == None:
return dir_dat("tls")
else:
return dir_dat("tls")
raise Exception("Missing year. Returning tile directory.")
else:
if dset == None:
return os.path.join(dir_dat("tls"), myear)
else:
return os.path.join(dir_dat("tls"), myear, dset, "0")
else:
if myear == None:
return dir_dat("tls")
raise Exception("Missing year. Returning tile directory.")
else:
if dset == None:
return os.path.join(dir_dat("tls"), myear)
raise Exception("Missing dset (X or y)." +\
"Returning tile directory.")
else:
return os.path.join(dir_dat("tls"), myear, dset, "0", plot_id)
def save_dataset_info(variables, year = year, name = "dset_info"):
tile_dir = dir_tls(myear = year)
os.makedirs(tile_dir, exist_ok = True)
with open(tile_dir + os.path.sep + name + ".pkl", "wb") as f:
pickle.dump(variables, f)
def get_dataset_info(year = year, name = "dset_info"):
tile_dir = dir_tls(myear = year)
with open(tile_dir + os.path.sep + name + ".pkl", "rb") as f:
return pickle.load(f)
def toINT(filename):
imgINT = filename.astype("uint8")
return imgINT
# get tile dimensions if not specified-----------------------------------------
from PIL import Image
if (args.imgr is None or args.imgc is None) and args.imgd is None:
imgs = list(pathlib.Path(os.path.dirname(dir_tls(myear = year,\
dset = "y")))\
.glob("**/*." + yf))
im = Image.open(imgs[0])
w, h = im.size
im.close()
# image dimensions
if args.imgr != args.imgc:
print("Warning: Arguments imgr and imgc do not match.")
if args.imgr is not None:
imgr = args.imgr
else:
imgr = h
if args.imgc is not None:
imgc = args.imgc
else:
imgc = w
if args.imgd is not None:
print("Argument imgd set. imgd overwrites imgr and imgc.")
imgr = args.imgd
imgc = args.imgd
# Data preparation-------------------------------------------------------------
### Run file DataPreparation.py
### read dictionary to group species to classes, if need be
import pandas as pd
specdict = pd.read_excel(dir_dat("xls,SpeciesList.xlsx"),
sheet_name = "Dictionary", header = 0)
# exec(open("A1_DataPreparation.py").read())
## load information generated during data preparation--------------------------
classes, classes_decoded, NoDataValue, no_data_class, abc = get_dataset_info()
N_CLASSES = len(classes) if no_data_class or abc else len(classes) + 1
if args.nc is not None:
N_CLASSES = args.nc
# Setup for training-----------------------------------------------------------
os.chdir(os.path.join(wd, "py3"))
os.chdir(wd)
# import modules---------------------------------------------------------------
#already done in A0_LeleNet.py: import tensorflow as tf
#import tensorflow_io as tfio
from tensorflow import keras as ks
AUTOTUNE = tf.data.experimental.AUTOTUNE
tf.__version__
ks.__version__
## make GPU available----------------------------------------------------------
phys_devs = tf.config.experimental.list_physical_devices("GPU")
print("N GPUs available: ", len(phys_devs))
#if len(phys_devs) >= 1 and False:
# tf.config.experimental.set_memory_growth(phys_devs[0], True)
#else:
# #os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
# my_devices = tf.config.experimental.list_physical_devices(device_type = "CPU")
# tf.config.experimental.set_visible_devices(devices = my_devices, device_type = "CPU")
# print("No GPUs used.")
## general options/info--------------------------------------------------------
N_img = len(list(pathlib.Path(dir_tls(myear = year, dset = "X")) \
.glob("**/*." + xf)))
N_val = len(list(pathlib.Path(dir_tls(myear = year, dset = "X_val")) \
.glob("**/*." + xf)))
zeed = 42
## build data loader-----------------------------------------------------------
def parse_image(img_path: str) -> dict:
# read image
image = tf.io.read_file(img_path)
if xf == "png":
image = tf.image.decode_png(image, channels = 3)
elif xf == "jpg":
image = tf.image.decode_jpeg(image, channels = 3)
elif xf == "tif":
import tensorflow_io as tfio
image = tfio.experimental.image.decode_tiff(image)
else:
print("Invalid X data format. Allowed formats: png, jpg, tif")
# read mask
mask_path = tf.strings.regex_replace(img_path, "X", "y")
mask_path = tf.strings.regex_replace(mask_path, "X." + xf, "y." + yf)
mask_path = tf.strings.regex_replace(mask_path, "image", "mask")
mask = tf.io.read_file(mask_path)
if yf == "png":
mask = tf.image.decode_png(mask, channels = | |
this is a multi-dimensional array)
if is_array_like(data_point) and len(data_point) > 1:
column_name = meta_data.full_name(skip_parents=True)
_table = functools.partial(array_like_to_table, data_point, column_name, meta_data['data_type'],
format=meta_data.get('format'),
full_label=self.structure.full_label,
array_label=self.structure.label,
_copy_data=False)
# Set widget to display the open table button
data_box['open_table'].configure(command=lambda *args: open_table(self._viewer, _table()))
data_box['open_table'].pack(ipadx=1)
data_box['entry'].pack_forget()
# Handle case where data point is a single scalar value (this is most of the time)
else:
# Convert array-like with single value to a scalar
if is_array_like(data_point) and len(data_point) == 1:
data_point = data_point[0]
# Output bit strings as hex values.
# Note: we ensure dtype has correct length; otherwise trailing null bytes are skipped by NumPy
if meta_data.data_type().issubtype('BitString'):
data_point_bytes = np.asarray(data_point, dtype=field.dtype).tobytes()
data_point = '0x' + binascii.hexlify(data_point_bytes).decode('ascii').upper()
# Decode byte strings to unicode strings. Done because byte strings with UTF-8 characters cannot be
# properly formatted, and so that on Python 2 the warning below does not error out if a non-ASCII
# data value was not able to be formatted.
elif isinstance(data_point, six.binary_type):
data_point = data_point.decode('utf-8')
# Set display values for data points
# (except masked values, which are skipped altogether as these values should only be empty
# values inside delimited tables.)
if data_point is not np.ma.masked:
# Format scalar value
if self.menu_option('display_data_as_formatted'):
# Skip formatting scaled/offset values, because the PDS4 Standard is ambiguous on whether
# field_format is pre- or post- scaling/offset. This can lead into incorrect formatting.
is_scaled = meta_data.get('scaling_factor', 1) != 1 or meta_data.get('value_offset', 0) != 0
try:
if ('format' in meta_data) and (not is_scaled):
# Convert from NumPy types into Python native types, otherwise the format statement below
# can return the format itself, when format is invalid, rather than raising an exception.
if hasattr(data_point, 'item'):
data_point = data_point.item()
data_point = meta_data['format'] % data_point
except (ValueError, TypeError):
self._issue_warning("Unable to format value '{0}' into field_format '{1}'; "
"displaying unformatted value"
.format(data_point, meta_data['format']), show=False)
# Set widget to display the data point value
data_box['entry'].insert(0, data_point)
data_box['entry'].pack(fill='both', expand=2, padx=4)
data_box['open_table'].pack_forget()
# Called when the vertical scrollbar is used, this method calls _scroll() to adjust the scrollbar's
# position and also calls _update_vertical_table_display()
def _vertical_scroll(self, action, first_cmd, second_cmd=''):
self._scroll(self._vert_scrollbar, first_cmd, second_cmd)
self._update_vertical_table_display(self._vert_scrollbar.get())
# Called when the horizontal scrollbar is used, this method calls _scroll() to adjust the scrollbar's
# position and also calls _update_horizontal_table_display()
def _horizontal_scroll(self, action, first_cmd, second_cmd=''):
self._scroll(self._horz_scrollbar, first_cmd, second_cmd)
self._update_horizontal_table_display(self._horz_scrollbar.get())
# Called by _vertical_scroll() and _horizontal_scroll() when any scrollbar is moved, this method
# adjusts the scrollbar's position to the new position
@classmethod
def _scroll(cls, scrollbar, first_cmd, second_cmd):
# Retrieve scrollbar position
position = scrollbar.get()
scrollbar_length = position[1] - position[0]
# This means scrollbar was dragged; first_cmd = offset
if second_cmd == '':
start_pos = float(first_cmd)
stop_pos = float(first_cmd) + scrollbar_length
# Prevent scroll beyond upper or left edge
if start_pos < 0:
start_pos = 0
stop_pos = 0 + scrollbar_length
# Prevent scroll beyond lower or right edge
if stop_pos > 1:
start_pos = 1 - scrollbar_length
stop_pos = 1
scrollbar.set(start_pos, stop_pos)
# This means either scrollbar scroll button was clicked, the empty area in the scrollbar was clicked,
# or the mouse scrollwheel was used; in all cases first_cmd = +n or -n, where n is an integer
# representing scroll speed; second_cmd = 'pages|units', where pages means scroll entire pages at a
# time and units is the normal scroll
else:
# Empty area in scrollbar was clicked
if second_cmd == 'pages':
num_total_steps = 1 / scrollbar_length
# Clicking in scrollbar empty area generally implies you want a pretty fast scroll,
# therefore we ensure that the entire area can be scrolled in no more than 100 steps
if num_total_steps > 100:
step = 0.01 * abs(int(first_cmd))
else:
step = scrollbar_length * abs(int(first_cmd))
# Scrollbar scroll button was clicked or mouse scrollwheel was used
else:
step = scrollbar_length / 5 * abs(int(first_cmd))
if int(first_cmd) > 0:
# Scroll down or right
if (position[1] + step) > 1:
scrollbar.set(1 - scrollbar_length, 1)
else:
scrollbar.set(position[0] + step, position[1] + step)
else:
# Scroll up or left
if (position[0] - step) < 0:
scrollbar.set(0, scrollbar_length)
else:
scrollbar.set(position[0] - step, position[1] - step)
# Adjusts the length of the scrollbars based on the total number of rows and columns in the table,
# versus how many are being displayed. Called any time window is resized.
def _set_scrollbar_length(self, num_display_rows, num_display_cols):
scrollbars = [{'vertical': self._vert_scrollbar}, {'horizontal': self._horz_scrollbar}]
for scrollbar in scrollbars:
scrollbar_type = list(scrollbar.keys())[0]
position = scrollbar[scrollbar_type].get()
start_offset = position[0]
if scrollbar_type == 'vertical':
end_offset = start_offset + float(num_display_rows) / self._settings['num_rows']
else:
end_offset = start_offset + float(num_display_cols) / self._settings['num_columns']
if end_offset > 1:
start_offset -= end_offset - 1
end_offset = 1
scrollbar[scrollbar_type].set(start_offset, end_offset)
self._widget.update_idletasks()
# Adjusts data values of self._data_boxes (values in boxes being displayed) to match the vertical position
# the user is currently viewing
def _update_vertical_table_display(self, position):
# Get the new display_start_row of the table (once it is updated)
start_row = int(round(self._settings['num_rows'] * position[0]))
if (start_row + self._settings['num_display_rows']) > self._settings['num_rows']:
start_row = self._settings['num_rows'] - self._settings['num_display_rows']
# Loop over each data box
for data_box in self._data_boxes:
# Skip the 'column name' row since it is statically displayed at the top regardless of scrolling,
# otherwise adjust the entry contents
if data_box['row'] != -1:
data_row_idx = data_box['row'] + start_row
data_col_idx = data_box['col'] + self._settings['display_start_col']
data_box['entry'].configure(state='normal')
data_box['entry'].delete(0, 'end')
# First column is the 'row count' row
if data_box['col'] == -1:
data_box['entry'].insert(0, data_row_idx)
# All other columns get data from the data table
else:
self._set_data_point_widget(data_row_idx, data_col_idx, data_box)
data_box['entry'].configure(state='readonly')
self._settings['display_start_row'] = start_row
# Adjusts data values of self._data_boxes (values in boxes being displayed) to match the horizontal
# position the user is currently viewing
def _update_horizontal_table_display(self, position):
# Get the new display_start_col of the table (once it is updated)
start_col = int(round(self._settings['num_columns'] * position[0]))
if (start_col + self._settings['num_display_cols']) > self._settings['num_columns']:
start_col = self._settings['num_columns'] - self._settings['num_display_cols']
# Loop over each entry
for data_box in self._data_boxes:
# Skip the 'row count' column since it is statically displayed at the left regardless of
# scrolling, otherwise adjust the entry contents
if data_box['col'] != -1:
data_row_idx = data_box['row'] + self._settings['display_start_row']
data_col_idx = data_box['col'] + start_col
data_box['entry'].configure(state='normal')
data_box['entry'].delete(0, 'end')
# First row is the 'column names' header row
if data_box['row'] == -1:
column_name = self.data[data_col_idx].meta_data.full_name(skip_parents=True)
data_box['entry'].insert(0, column_name)
# All other rows get data from the data table
else:
self._set_data_point_widget(data_row_idx, data_col_idx, data_box)
data_box['entry'].configure(state='readonly')
self._settings['display_start_col'] = start_col
# Called on mouse wheel scroll action, scrolls window up or down
def _mousewheel_scroll(self, event):
event_delta = int(-1 * event.delta)
if platform.system() != 'Darwin':
event_delta //= 120
self._vertical_scroll('scroll', event_delta, 'units')
# Determine the number of rows and columns that will fit in the viewer (with current window size)
def _num_fit_rows_columns(self):
self._widget.update_idletasks()
# The Display_Frame contains only the table UI elements, meaning exactly what must fit
# Rows follows the formula: (Display_Frame_Width - Column_Header_Height) / Row_Height
# Columns follow the formula: (Display_Frame_Width - Row_Count_Width) / Column_Width
num_display_rows = int(floor((self._scrollable_canvas.winfo_height() - 34) / 24.))
num_display_cols = int(floor((self._scrollable_canvas.winfo_width() - 62) / 138.))
if num_display_rows > self._settings['num_rows']:
num_display_rows = self._settings['num_rows']
elif num_display_rows < 0:
num_display_rows = 0
if num_display_cols > self._settings['num_columns']:
num_display_cols = self._settings['num_columns']
elif num_display_cols < 0:
num_display_cols = 0
return num_display_rows, num_display_cols
# Called when the window has been resized, this method calculates which rows and columns are able to fit
# with-in the new window size and then calls either _draw_data() or _erase_data() to add or remove the
# appropriate rows/columns
def _window_resize(self, event):
self._update_window_dimensions()
# Do not continue (since we do not need to re-display anything) if no data is displayed
if not self._data_open:
return
# Determine the number of rows and columns that will fit in the viewer (with current window size)
num_display_rows, num_display_cols = self._num_fit_rows_columns()
# Determine number of rows/columns missing or extra rows being shown in comparison to what can fit
num_missing_rows = num_display_rows - self._settings['num_display_rows']
num_missing_cols = num_display_cols - self._settings['num_display_cols']
# Calculate start and stop | |
[t["class_names"] for t in tasks]
self.num_anchor_per_locs = [2 * n for n in num_classes]
self.box_coder = box_coder
box_code_sizes = [box_coder.code_size] * len(num_classes)
self.with_cls = with_cls
self.with_reg = with_reg
self.in_channels = in_channels
self.num_classes = num_classes
self.reg_class_agnostic = reg_class_agnostic
self.encode_rad_error_by_sin = encode_rad_error_by_sin
self.encode_background_as_zeros = encode_background_as_zeros
self.use_sigmoid_score = use_sigmoid_score
self.box_n_dim = self.box_coder.code_size
self.anchor_dim = self.box_coder.n_dim
self.loss_cls = build_loss(loss_cls)
self.loss_reg = build_loss(loss_bbox)
if loss_aux is not None:
self.loss_aux = build_loss(loss_aux)
self.loss_norm = loss_norm
if not logger:
logger = logging.getLogger("MultiGroupHead")
self.logger = logger
self.dcn = None
self.zero_init_residual = False
self.use_direction_classifier = loss_aux is not None
if loss_aux:
self.direction_offset = direction_offset
self.bev_only = True if mode == "bev" else False
num_clss = []
num_preds = []
num_dirs = []
for num_c, num_a, box_cs in zip(
num_classes, self.num_anchor_per_locs, box_code_sizes
):
if self.encode_background_as_zeros:
num_cls = num_a * num_c
else:
num_cls = num_a * (num_c + 1)
num_clss.append(num_cls)
if self.use_direction_classifier:
num_dir = num_a * 2
num_dirs.append(num_dir)
else:
num_dir = None
# here like CenterHead, we regress to diffrent targets in separate heads
num_pred = copy.deepcopy(common_heads)
num_preds.append(num_pred)
logger.info(
f"num_classes: {num_classes}, num_dirs: {num_dirs}"
)
self.shared_conv = nn.Sequential(
nn.Conv2d(in_channels, share_conv_channel,
kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(share_conv_channel),
nn.ReLU(inplace=True)
)
self.tasks = nn.ModuleList()
for task_id, (num_pred, num_cls) in enumerate(zip(num_preds, num_clss)):
self.tasks.append(
Head(
share_conv_channel,
num_pred,
num_cls,
use_dir=self.use_direction_classifier,
num_dir=num_dirs[task_id]
if self.use_direction_classifier
else None,
header=False,
init_bias=init_bias,
num_classes=num_classes[task_id],
)
)
logger.info("Finish MultiGroupHead Initialization")
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(m, "conv2_offset"):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError("pretrained must be a str or None")
def forward(self, x):
x = self.shared_conv(x)
ret_dicts = []
for task in self.tasks:
ret_dicts.append(task(x))
return ret_dicts
def prepare_loss_weights(
self,
labels,
loss_norm=dict(
type="NormByNumPositives", pos_cls_weight=1.0, neg_cls_weight=1.0,
),
dtype=torch.float32,
):
loss_norm_type = getattr(LossNormType, loss_norm["type"])
pos_cls_weight = loss_norm["pos_cls_weight"]
neg_cls_weight = loss_norm["neg_cls_weight"]
cared = labels >= 0
# cared: [N, num_anchors]
positives = labels > 0
negatives = labels == 0
negative_cls_weights = negatives.type(dtype) * neg_cls_weight
cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)
reg_weights = positives.type(dtype)
if loss_norm_type == LossNormType.NormByNumExamples:
num_examples = cared.type(dtype).sum(1, keepdim=True)
num_examples = torch.clamp(num_examples, min=1.0)
cls_weights /= num_examples
bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(bbox_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPositives:
pos_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPosNeg:
pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)
normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]
cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]
cls_normalizer = torch.clamp(cls_normalizer, min=1.0)
# cls_normalizer will be pos_or_neg_weight/num_pos_or_neg
normalizer = torch.clamp(normalizer, min=1.0)
reg_weights /= normalizer[:, 0:1, 0]
cls_weights /= cls_normalizer
elif loss_norm_type == LossNormType.DontNorm: # support ghm loss
pos_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
else:
raise ValueError(f"unknown loss norm type. available: {list(LossNormType)}")
return cls_weights, reg_weights, cared
def loss(self, example, preds_dicts, **kwargs):
voxels = example["voxels"]
num_points = example["num_points"]
coors = example["coordinates"]
batch_anchors = example["anchors"]
batch_size_device = batch_anchors[0].shape[0]
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
losses = dict()
num_class = self.num_classes[task_id]
box_preds = preds_dict["box_preds"]
cls_preds = preds_dict["cls_preds"]
labels = example["labels"][task_id]
if kwargs.get("mode", False):
reg_targets = example["reg_targets"][task_id][:, :, [0, 1, 3, 4, 6]]
reg_targets_left = example["reg_targets"][task_id][:, :, [2, 5]]
else:
reg_targets = example["reg_targets"][task_id]
cls_weights, reg_weights, cared = self.prepare_loss_weights(
labels, loss_norm=self.loss_norm, dtype=torch.float32,
)
cls_targets = labels * cared.type_as(labels)
cls_targets = cls_targets.unsqueeze(-1)
loc_loss, cls_loss = create_loss(
self.loss_reg,
self.loss_cls,
box_preds,
cls_preds,
cls_targets,
cls_weights,
reg_targets,
reg_weights,
num_class,
self.encode_background_as_zeros,
self.encode_rad_error_by_sin,
bev_only=self.bev_only,
box_code_size=self.box_n_dim,
)
loc_loss_reduced = loc_loss.sum() / batch_size_device
loc_loss_reduced *= self.loss_reg._loss_weight
cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)
cls_pos_loss /= self.loss_norm["pos_cls_weight"]
cls_neg_loss /= self.loss_norm["neg_cls_weight"]
cls_loss_reduced = cls_loss.sum() / batch_size_device
cls_loss_reduced *= self.loss_cls._loss_weight
loss = loc_loss_reduced + cls_loss_reduced
if self.use_direction_classifier:
dir_targets = get_direction_target(
example["anchors"][task_id],
reg_targets,
dir_offset=self.direction_offset,
)
dir_logits = preds_dict["dir_cls_preds"].view(batch_size_device, -1, 2)
weights = (labels > 0).type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.loss_aux(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size_device
loss += dir_loss * self.loss_aux._loss_weight
# losses['loss_aux'] = dir_loss
loc_loss_elem = [
loc_loss[:, :, i].sum() / batch_size_device
for i in range(loc_loss.shape[-1])
]
ret = {
"loss": loss,
"cls_pos_loss": cls_pos_loss.detach().cpu(),
"cls_neg_loss": cls_neg_loss.detach().cpu(),
"dir_loss_reduced": dir_loss.detach().cpu()
if self.use_direction_classifier
else torch.tensor(0),
"cls_loss_reduced": cls_loss_reduced.detach().cpu().mean(),
"loc_loss_reduced": loc_loss_reduced.detach().cpu().mean(),
"loc_loss_elem": [elem.detach().cpu() for elem in loc_loss_elem],
"num_pos": (labels > 0)[0].sum(),
"num_neg": (labels == 0)[0].sum(),
}
# self.rpn_acc.clear()
# losses['acc'] = self.rpn_acc(
# example['labels'][task_id],
# cls_preds,
# cared,
# )
# losses['pr'] = {}
# self.rpn_pr.clear()
# prec, rec = self.rpn_pr(
# example['labels'][task_id],
# cls_preds,
# cared,
# )
# for i, thresh in enumerate(self.rpn_pr.thresholds):
# losses["pr"][f"prec@{int(thresh*100)}"] = float(prec[i])
# losses["pr"][f"rec@{int(thresh*100)}"] = float(rec[i])
rets.append(ret)
"""convert batch-key to key-batch
"""
rets_merged = defaultdict(list)
for ret in rets:
for k, v in ret.items():
rets_merged[k].append(v)
return rets_merged
def predict(self, example, preds_dicts, test_cfg, **kwargs):
"""start with v1.6.0, this function don't contain any kitti-specific code.
Returns:
predict: list of pred_dict.
pred_dict: {
box3d_lidar: [N, 7] 3d box.
scores: [N]
label_preds: [N]
metadata: meta-data which contains dataset-specific information.
for kitti, it contains image idx (label idx),
for nuscenes, sample_token is saved in it.
}
"""
voxels = example["voxels"]
num_points = example["num_points"]
coors = example["coordinates"]
batch_anchors = example["anchors"]
batch_size_device = batch_anchors[0].shape[0]
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
batch_size = batch_anchors[task_id].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size
else:
meta_list = example["metadata"]
batch_task_anchors = example["anchors"][task_id].view(
batch_size, -1, self.anchor_dim
)
if "anchors_mask" not in example:
batch_anchors_mask = [None] * batch_size
else:
batch_anchors_mask = example["anchors_mask"][task_id].view(
batch_size, -1
)
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"]
if self.bev_only:
box_ndim = self.box_n_dim - 2
else:
box_ndim = self.box_n_dim
if kwargs.get("mode", False):
batch_box_preds_base = batch_box_preds.view(batch_size, -1, box_ndim)
batch_box_preds = batch_task_anchors.clone()
batch_box_preds[:, :, [0, 1, 3, 4, 6]] = batch_box_preds_base
else:
batch_box_preds = batch_box_preds.view(batch_size, -1, box_ndim)
num_class_with_bg = self.num_classes[task_id]
if not self.encode_background_as_zeros:
num_class_with_bg = self.num_classes[task_id] + 1
batch_cls_preds = batch_cls_preds.view(batch_size, -1, num_class_with_bg)
batch_reg_preds = self.box_coder.decode_torch(
batch_box_preds[:, :, : self.box_coder.code_size], batch_task_anchors
)
if self.use_direction_classifier:
batch_dir_preds = preds_dict["dir_cls_preds"]
batch_dir_preds = batch_dir_preds.view(batch_size, -1, 2)
else:
batch_dir_preds = [None] * batch_size
rets.append(
self.get_task_detections(
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds,
batch_anchors_mask,
meta_list,
)
)
# Merge branches results
num_tasks = len(rets)
ret_list = []
# len(rets) == task num
# len(rets[0]) == batch_size
num_preds = len(rets)
num_samples = len(rets[0])
ret_list = []
for i in range(num_samples):
ret = {}
for k in rets[0][i].keys():
if k in ["box3d_lidar", "scores"]:
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k in ["label_preds"]:
flag = 0
for j, num_class in enumerate(self.num_classes):
rets[j][i][k] += flag
flag += num_class
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k == "metadata":
# metadata
ret[k] = rets[0][i][k]
ret_list.append(ret)
return ret_list
def get_task_detections(
self,
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds=None,
batch_anchors_mask=None,
meta_list=None,
):
predictions_dicts = []
post_center_range = test_cfg.post_center_limit_range
if len(post_center_range) > 0:
post_center_range = torch.tensor(
post_center_range,
dtype=batch_reg_preds.dtype,
device=batch_reg_preds.device,
)
for box_preds, cls_preds, dir_preds, a_mask, meta in zip(
batch_reg_preds,
batch_cls_preds,
batch_dir_preds,
batch_anchors_mask,
meta_list,
):
if a_mask is not None:
box_preds = box_preds[a_mask]
cls_preds = cls_preds[a_mask]
box_preds = box_preds.float()
cls_preds = cls_preds.float()
if self.use_direction_classifier:
if a_mask is not None:
dir_preds = dir_preds[a_mask]
dir_labels = torch.max(dir_preds, dim=-1)[1]
if self.encode_background_as_zeros:
# this don't support softmax
assert self.use_sigmoid_score is True
total_scores = torch.sigmoid(cls_preds)
else:
# encode background as first element in one-hot vector
if self.use_sigmoid_score:
total_scores = torch.sigmoid(cls_preds)[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
# Apply NMS in birdeye view
if test_cfg.nms.use_rotate_nms:
nms_func = box_torch_ops.rotate_nms
else:
nms_func = box_torch_ops.nms
feature_map_size_prod = (
batch_reg_preds.shape[1] // self.num_anchor_per_locs[task_id]
)
if test_cfg.nms.use_multi_class_nms:
assert self.encode_background_as_zeros is True
boxes_for_nms = box_preds[:, [0, 1, 3, 4, -1]]
if not test_cfg.nms.use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4], boxes_for_nms[:, 4]
)
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners
)
selected_boxes, selected_labels, selected_scores = [], [], []
selected_dir_labels = []
scores = total_scores
boxes = boxes_for_nms
selected_per_class = []
score_threshs = [test_cfg.score_threshold] * self.num_classes[task_id]
pre_max_sizes = [test_cfg.nms.nms_pre_max_size] * self.num_classes[
task_id
]
post_max_sizes = [test_cfg.nms.nms_post_max_size] * self.num_classes[
task_id
]
iou_thresholds = [test_cfg.nms.nms_iou_threshold] * self.num_classes[
task_id
]
for class_idx, score_thresh, | |
<reponame>Mazharul-Hossain/sequence-based-recommendations
from __future__ import print_function
import argparse
import os
import re
import sys
from shutil import copyfile
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
def command_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest='filename', help='Input file', required=True, type=str)
parser.add_argument('--columns',
help='Order of the columns in the file (eg: "uirt"), u for user, i for item, t for timestamp, '
'r for rating. If r is not present a default rating of 1 is given to all interaction. If '
't is not present interactions are assumed to be in chronological order. Extra columns '
'are ignored. Default: uit',
default="uit", type=str)
parser.add_argument('--sep',
help='Separator between the column. If unspecified pandas will try to guess the separator',
default="\t", type=str)
parser.add_argument('--min_user_activity',
help='Users with less interactions than this will be removed from the dataset. Default: 2',
default=2, type=int)
parser.add_argument('--min_item_pop',
help='Items with less interactions than this will be removed from the dataset. Default: 5',
default=5, type=int)
parser.add_argument('--val_size',
help='Number of users to put in the validation set. If in (0,1) it will be interpreted as the '
'fraction of total number of users. Default: 0.1',
default=0.1, type=float)
parser.add_argument('--test_size',
help='Number of users to put in the test set. If in (0,1) it will be interpreted as the '
'fraction of total number of users. Default: 0.1',
default=0.1, type=float)
parser.add_argument('--seed', help='Seed for the random train/val/test split', default=1, type=int)
parser.add_argument('--mf', help='Use movie features input file.', default="", type=str)
parser.add_argument('--c_i',
help='Use custom testing set (contains users rated after 2017-01-01 place in test set).',
action='store_true')
args = parser.parse_args()
args.dirname = os.path.dirname(os.path.abspath(args.filename)) + "/"
return args
def warn_user(dirname):
"""
Ask user if he's sure to create files in that directory.
"""
print('This program will create a lot of files and directories in ' + dirname)
answer = input('Are you sure that you want to do that ? [y/n]')
if answer != "y" or answer != "Y":
sys.exit(0)
def create_dirs(dirname):
if not os.path.exists(dirname + "data"):
os.makedirs(dirname + "data")
if not os.path.exists(dirname + "models"):
os.makedirs(dirname + "models")
if not os.path.exists(dirname + "results"):
os.makedirs(dirname + "results")
def load_data(filename, columns, separator):
"""
Load the data from filename and sort it according to timestamp.
Returns a dataframe with 3 columns: user_id, item_id, rating
"""
print('Load data...')
# data = pd.read_csv(filename, sep=separator, names=list(columns), index_col=False, usecols=range(len(columns)))
data = pd.read_csv(filename)
print(data.dtypes)
replace_columns = {}
for k, v in zip(data.columns, list(columns)):
replace_columns[k] = v
data.rename(columns=replace_columns, inplace=True)
print(data.dtypes)
if 'r' not in columns:
# Add a column of default ratings
data['r'] = 1
if 't' in columns:
# sort according to the timestamp column
if data['t'].dtype == np.int64: # probably a timestamp
data['t'] = pd.to_datetime(data['t'], unit='s')
else:
data['t'] = pd.to_datetime(data['t'])
print('Sort data in chronological order...')
data.sort_values('t', inplace=True)
return data
def remove_rare_elements(data, min_user_activity, min_item_popularity):
"""
Removes user and items that appears in too few interactions.
min_user_activity is the minimum number of interaction that a user should have.
min_item_popularity is the minimum number of interaction that an item should have.
NB: the constraint on item might not be strictly satisfied because rare users and items are removed in alternate,
and the last removal of inactive users might create new rare items.
"""
print('Remove inactive users and rare items...')
# Remove inactive users a first time
# user_activity = data.groupby('u').size()
# data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
# Remove unpopular items
item_popularity = data.groupby('i').size()
data = data[np.in1d(data.i, item_popularity[item_popularity >= min_item_popularity].index)]
# Remove users that might have passed below the activity threshold due to the removal of rare items
user_activity = data.groupby('u').size()
data = data[np.in1d(data.u, user_activity[user_activity >= min_user_activity].index)]
return data
def save_index_mapping(data, dirname, separator="\t"):
"""
Save the mapping of original user and item ids to numerical consecutive ids in dirname.
NB: some users and items might have been removed in previous steps and will therefore not appear in the mapping.
"""
# Pandas categorical type will create the numerical ids we want
print('Map original users and items ids to consecutive numerical ids...')
data['u_original'] = data['u'].astype('category')
data['i_original'] = data['i'].astype('category')
data['u'] = data['u_original'].cat.codes
data['i'] = data['i_original'].cat.codes
print('Save ids mapping to file...')
user_mapping = pd.DataFrame({'original_id': data['u_original'], 'new_id': data['u']})
user_mapping.sort_values('original_id', inplace=True)
user_mapping.drop_duplicates(subset='original_id', inplace=True)
user_mapping.to_csv(dirname + "data/user_id_mapping.txt", sep=separator, index=False)
item_mapping = pd.DataFrame({'original_id': data['i_original'], 'new_id': data['i']})
item_mapping.sort_values('original_id', inplace=True)
item_mapping.drop_duplicates(subset='original_id', inplace=True)
item_mapping.to_csv(dirname + "data/item_id_mapping.txt", sep=separator, index=False)
return data
def split_data(data, nb_val_users, nb_test_users, dirname, custom=False):
"""
Splits the data set into training, validation and test sets.
Each user is in one and only one set.
nb_val_users is the number of users to put in the validation set.
nb_test_users is the number of users to put in the test set.
"""
nb_users = data['u'].nunique()
# check if nb_val_user is specified as a fraction
if nb_val_users < 1:
nb_val_users = round(nb_val_users * nb_users)
if nb_test_users < 1:
nb_test_users = round(nb_test_users * nb_users)
nb_test_users = int(nb_test_users)
nb_val_users = int(nb_val_users)
if nb_users <= nb_val_users + nb_test_users:
raise ValueError('Not enough users in the dataset: choose less users for validation and test splits')
def extract_n_users(df, n, test=False, i_custom=False):
if test and i_custom:
temp_df = df.loc[df.t > '2018-01-01']
print("custom test dataset: ", len(temp_df['u'].unique()), n)
if len(temp_df['u'].unique()) > n:
users_ids = np.random.choice(temp_df['u'].unique(), n)
else:
users_ids = np.array(temp_df['u'].unique())
else:
users_ids = np.random.choice(df['u'].unique(), n)
n_set = df[df['u'].isin(users_ids)]
print("test dataset: ", n_set.shape)
remain_set = df.drop(n_set.index)
return n_set, remain_set
print('Split data into training, validation and test sets...')
test_set, tmp_set = extract_n_users(data, nb_test_users, test=True, i_custom=custom)
val_set, train_set = extract_n_users(tmp_set, nb_val_users, i_custom=custom)
print('Save training, validation and test sets in the triplets format...')
train_set.to_csv(dirname + "data/train_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
val_set.to_csv(dirname + "data/val_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
test_set.to_csv(dirname + "data/test_set_triplets", sep="\t", columns=['u', 'i', 'r'], index=False, header=False)
return train_set, val_set, test_set
def gen_sequences(data, half=False):
"""
Generates sequences of user actions from data.
each sequence has the format [user_id, first_item_id, first_item_rating, 2nd_item_id, 2nd_item_rating, ...].
If half is True, cut the sequences to half their true length (useful to produce the extended training set).
"""
data = data.sort_values('u', kind="mergesort") # Mergesort is stable and keeps the time ordering
seq = []
prev_id = -1
for u, i, r in zip(data['u'], data['i'], data['r']):
if u != prev_id:
if len(seq) > 3:
if half:
seq = seq[:1 + 2 * int((len(seq) - 1) / 4)]
yield seq
prev_id = u
seq = [u]
seq.extend([i, r])
if half:
seq = seq[:1 + 2 * int((len(seq) - 1) / 4)]
yield seq
def make_sequence_format(train_set, val_set, test_set, dirname):
"""
Convert the train/validation/test sets in the sequence format and save them.
Also create the extended training sequences, which contains the first half of the sequences
of users in the validation and test sets.
"""
print('Save the training set in the sequences format...')
with open(dirname + "data/train_set_sequences", "w") as f:
for s in gen_sequences(train_set):
f.write(' '.join(map(str, s)) + "\n")
print('Save the validation set in the sequences format...')
with open(dirname + "data/val_set_sequences", "w") as f:
for s in gen_sequences(val_set):
f.write(' '.join(map(str, s)) + "\n")
print('Save the test set in the sequences format...')
with open(dirname + "data/test_set_sequences", "w") as f:
for s in gen_sequences(test_set):
f.write(' '.join(map(str, s)) + "\n")
# sequences+ contains all the sequences of train_set_sequences plus half the sequences of val and test sets
print('Save the extended training set in the sequences format...')
copyfile(dirname + "data/train_set_sequences", dirname + "data/train_set_sequences+")
with open(dirname + "data/train_set_sequences+", "a") as f:
for s in gen_sequences(val_set, half=True):
f.write(' '.join(map(str, s)) + "\n")
for s in gen_sequences(test_set, half=True):
f.write(' '.join(map(str, s)) + "\n")
def save_data_stats(data, train_set, val_set, test_set, dirname):
print('Save stats...')
def _get_stats(df):
return " \t ".join(
map(str, [df['u'].nunique(), df['i'].nunique(), len(df.index), df.groupby('u').size().max()]))
with open(dirname + "data/stats", "w") as f:
f.write("set \t n_users \t n_items \t n_interactions \t longest_sequence\n")
f.write("Full \t " + _get_stats(data) + "\n")
f.write("Train \t " + _get_stats(train_set) + "\n")
f.write("Val \t " + _get_stats(val_set) + "\n")
f.write("Test \t " + _get_stats(test_set) + "\n")
def make_readme(dirname, val_set, test_set):
data_readme = '''The following files were automatically generated by preprocess.py
user_id_mapping
mapping between the users ids in the original dataset and the new users ids.
the first column contains the new id and the second the original id.
Inactive users might have been | |
'3584570':{'en': 'AMT'},
'3584571':{'en': 'Tismi'},
'3584572':{'en': 'Telavox AB'},
'3584573':{'en': 'AMT'},
'3584574':{'en': 'DNA'},
'3584575':{'en': 'AMT'},
'3584576':{'en': 'DNA'},
'3584577':{'en': 'DNA'},
'3584578':{'en': 'DNA'},
'3584579':{'en': 'DNA'},
'358458':{'en': 'Elisa'},
'35846':{'en': 'Elisa'},
'35850':{'en': 'Elisa'},
'35987':{'en': 'Vivacom'},
'35988':{'en': 'A1'},
'35989':{'en': 'Telenor'},
'359988':{'en': 'Bob'},
'359989':{'en': 'A1'},
'359996':{'en': 'Bulsatcom'},
'359999':{'en': 'MAX'},
'3620':{'en': 'Telenor'},
'3630':{'en': 'Magyar Telekom'},
'36312000':{'en': 'Netfone Telecom'},
'36312001':{'en': 'Netfone Telecom'},
'3631310':{'en': 'Vodafone'},
'3631311':{'en': 'Vodafone'},
'3631312':{'en': 'Vodafone'},
'3631313':{'en': 'Vodafone'},
'3631314':{'en': 'Vodafone'},
'3631315':{'en': 'Vodafone'},
'3631316':{'en': 'Vodafone'},
'3631317':{'en': 'Vodafone'},
'3631318':{'en': 'Vodafone'},
'36313190':{'en': 'Vodafone'},
'36313191':{'en': 'Vodafone'},
'36313192':{'en': 'Vodafone'},
'36313193':{'en': 'Vodafone'},
'36313194':{'en': 'Vodafone'},
'36313195':{'en': 'Vodafone'},
'36313196':{'en': 'Vodafone'},
'36313197':{'en': 'Vodafone'},
'36313199':{'en': 'Vodafone'},
'3631320':{'en': 'Vodafone'},
'3631321':{'en': 'Vodafone'},
'3631322':{'en': 'Vodafone'},
'3631323':{'en': 'Vodafone'},
'3631324':{'en': 'Vodafone'},
'3631325':{'en': 'Vodafone'},
'3631326':{'en': 'Vodafone'},
'3631327':{'en': 'Vodafone'},
'3631328':{'en': 'Vodafone'},
'36313290':{'en': 'Vodafone'},
'36313291':{'en': 'Vodafone'},
'36313292':{'en': 'Vodafone'},
'3631330':{'en': 'Vodafone'},
'3631331':{'en': 'Vodafone'},
'3631332':{'en': 'Vodafone'},
'36313330':{'en': 'Vidanet'},
'36313331':{'en': 'Vidanet'},
'36313666':{'en': 'Vodafone'},
'36317000':{'en': 'TARR'},
'36317001':{'en': 'TARR'},
'36317002':{'en': 'TARR'},
'36317003':{'en': 'TARR'},
'36317004':{'en': 'TARR'},
'3631770':{'en': 'UPC'},
'3631771':{'en': 'UPC'},
'363178':{'en': 'UPC'},
'3631790':{'en': 'UPC'},
'36501':{'en': 'DIGI'},
'36502':{'en': 'DIGI'},
'3670':{'en': 'Vodafone'},
'37060':{'en': 'Tele 2'},
'37061':{'en': 'Omnitel'},
'37062':{'en': 'Omnitel'},
'37064':{'en': u('BIT\u0116')},
'370645':{'en': 'Tele 2'},
'370646':{'en': 'Tele 2'},
'370647':{'en': 'Tele 2'},
'370648':{'en': 'Tele 2'},
'37065':{'en': u('BIT\u0116')},
'370660':{'en': u('BIT\u0116')},
'370662':{'en': 'Omnitel'},
'37067':{'en': 'Tele 2'},
'370680':{'en': 'Omnitel'},
'370681':{'en': u('BIT\u0116')},
'370682':{'en': 'Omnitel'},
'370683':{'en': 'Tele 2'},
'370684':{'en': 'Tele 2'},
'370685':{'en': u('BIT\u0116')},
'370686':{'en': 'Omnitel'},
'370687':{'en': 'Omnitel'},
'370688':{'en': 'Omnitel'},
'370689':{'en': u('BIT\u0116')},
'370690':{'en': u('BIT\u0116')},
'370692':{'en': 'Omnitel'},
'370693':{'en': 'Omnitel'},
'370694':{'en': 'Omnitel'},
'370695':{'en': 'Omnitel'},
'370696':{'en': 'Omnitel'},
'370697':{'en': 'Omnitel'},
'370698':{'en': 'Omnitel'},
'370699':{'en': u('BIT\u0116')},
'37250':{'en': 'EMT'},
'372519':{'en': 'EMT'},
'37252':{'en': 'EMT'},
'372530':{'en': 'EMT'},
'372533':{'en': 'EMT'},
'372534':{'en': 'EMT'},
'372536':{'en': 'EMT'},
'372537':{'en': 'EMT'},
'372538':{'en': 'EMT'},
'372539':{'en': 'EMT'},
'372545':{'en': 'Elisa'},
'37255':{'en': 'Tele 2'},
'37256':{'en': 'Elisa'},
'372577':{'en': 'Elisa'},
'37258':{'en': 'Tele 2'},
'372590':{'en': 'EMT'},
'37356':{'en': 'IDC'},
'37360':{'en': 'Orange'},
'373610':{'en': 'Orange'},
'373611':{'en': 'Orange'},
'373620':{'en': 'Orange'},
'373621':{'en': 'Orange'},
'37367':{'en': 'Moldtelecom'},
'37368':{'en': 'Orange'},
'37369':{'en': 'Orange'},
'37376':{'en': 'Moldcell'},
'373774':{'en': 'IDC'},
'373775':{'en': 'IDC'},
'373777':{'en': 'IDC'},
'373778':{'en': 'IDC'},
'373779':{'en': 'IDC'},
'37378':{'en': 'Moldcell'},
'37379':{'en': 'Moldcell'},
'37433':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37441':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37443':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37444':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37449':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'3745':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'3747':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37488':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37491':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37493':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37494':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37495':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37496':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37498':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37499':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37525':{'be': u('\u0411\u0435\u0421\u0422'), 'en': 'life:)', 'ru': 'life:)'},
'375291':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375292':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375293':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375294':{'be': u('\u0411\u0435\u043b\u0421\u0435\u043b'), 'en': 'Belcel', 'ru': u('\u0411\u0435\u043b\u0421\u0435\u043b')},
'375295':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375296':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375297':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375298':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375299':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'37533':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'37544':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'3763':{'en': 'Mobiland'},
'38050':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38063':{'en': 'lifecell', 'uk': 'lifecell'},
'38066':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38067':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38068':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38073':{'en': 'lifecell', 'uk': 'lifecell'},
'38091':{'en': 'TriMob', 'uk': u('\u0422\u0440\u0438\u041c\u043e\u0431')},
'38092':{'en': 'PEOPLEnet', 'uk': 'PEOPLEnet'},
'38093':{'en': 'lifecell', 'uk': 'lifecell'},
'38094':{'en': 'Intertelecom', 'uk': u('\u0406\u043d\u0442\u0435\u0440\u0442\u0435\u043b\u0435\u043a\u043e\u043c')},
'38095':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38096':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38097':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38098':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38099':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38160':{'en': 'VIP'},
'38161':{'en': 'VIP'},
'38162':{'en': 'Telenor'},
'38163':{'en': 'Telenor'},
'38164':{'en': 'mts'},
'38165':{'en': 'mts'},
'38166':{'en': 'mts'},
'381677':{'en': 'GLOBALTEL'},
'381678':{'en': 'Vectone Mobile'},
'38168':{'en': 'VIP'},
'38169':{'en': 'Telenor'},
'38260':{'en': 'm:tel'},
'38263':{'en': 'Telenor'},
'38266':{'en': 'Telekom'},
'38267':{'en': 'Telekom'},
'38268':{'en': 'm:tel'},
'38269':{'en': 'Telenor'},
'38343':{'en': 'IPKO'},
'383432':{'en': 'D3 Mobile'},
'383433':{'en': 'D3 Mobile'},
'383434':{'en': 'D3 Mobile'},
'38344':{'en': 'vala'},
'383451':{'en': 'vala'},
'383452':{'en': 'vala'},
'383453':{'en': 'vala'},
'383454':{'en': 'vala'},
'383455':{'en': 'Z Mobile'},
'383456':{'en': 'Z Mobile'},
'383457':{'en': 'vala'},
'383458':{'en': 'vala'},
'383459':{'en': 'vala'},
'383461':{'en': 'Z Mobile'},
'3834710':{'en': 'mts d.o.o.'},
'3834711':{'en': 'mts d.o.o.'},
'3834712':{'en': 'mts d.o.o.'},
'3834713':{'en': 'mts d.o.o.'},
'3834714':{'en': 'mts d.o.o.'},
'3834715':{'en': 'mts d.o.o.'},
'38348':{'en': 'IPKO'},
'38349':{'en': 'IPKO'},
'38590':{'en': 'Tele2'},
'38591':{'en': 'A1 Telekom'},
'38592':{'en': 'A1 Telekom'},
'38595':{'en': 'Tele2'},
'385970':{'en': 'Hrvatski Telekom'},
'385975':{'en': 'Telefocus'},
'385976':{'en': 'Hrvatski Telekom'},
'385977':{'en': 'Hrvatski Telekom'},
'385979':{'en': 'Hrvatski Telekom'},
'38598':{'en': 'Hrvatski Telekom'},
'38599':{'en': 'Hrvatski Telekom'},
'38630':{'en': 'A1'},
'38631':{'en': 'Telekom Slovenije'},
'38640':{'en': 'A1'},
'38641':{'en': 'Telekom Slovenije'},
'38643':{'en': 'Telekom Slovenije'},
'38649':{'en': 'Telekom Slovenije'},
'38651':{'en': 'Telekom Slovenije'},
'38664':{'en': 'T-2'},
'386651':{'en': u('S\u017d - Infrastruktura')},
'386655':{'en': 'Telekom Slovenije'},
'386656':{'en': 'Telekom Slovenije'},
'386657':{'en': 'Novatel'},
'38668':{'en': 'A1'},
'38669':{'en': 'A1'},
'3866910':{'en': 'Compatel'},
'38670':{'en': 'Telemach'},
'38671':{'en': 'Telemach'},
'38760':{'en': 'BH Telecom'},
'38761':{'en': 'BH Telecom'},
'38762':{'en': 'BH Telecom'},
'38763':{'en': 'HT ERONET'},
'38764':{'en': 'HT ERONET'},
'38765':{'en': 'm:tel'},
'38766':{'en': 'm:tel'},
'38767':{'en': 'm:tel'},
'389701':{'en': 'T-Mobile'},
'389702':{'en': 'T-Mobile'},
'389703':{'en': 'T-Mobile'},
'389704':{'en': 'T-Mobile'},
'389705':{'en': 'T-Mobile'},
'389706':{'en': 'T-Mobile'},
'389707':{'en': 'T-Mobile'},
'389708':{'en': 'T-Mobile'},
'389709':{'en': 'T-Mobile'},
'389711':{'en': 'T-Mobile'},
'389712':{'en': 'T-Mobile'},
'389713':{'en': 'T-Mobile'},
'389714':{'en': 'T-Mobile'},
'389715':{'en': 'T-Mobile'},
'389716':{'en': 'T-Mobile'},
'389717':{'en': 'T-Mobile'},
'389718':{'en': 'T-Mobile'},
'389719':{'en': 'T-Mobile'},
'389721':{'en': 'T-Mobile'},
'389722':{'en': 'T-Mobile'},
'389723':{'en': 'T-Mobile'},
'389724':{'en': 'T-Mobile'},
'389725':{'en': 'T-Mobile'},
'389726':{'en': 'T-Mobile'},
'389727':{'en': 'T-Mobile'},
'389729':{'en': 'T-Mobile'},
'389732':{'en': 'Vip'},
'389733':{'en': 'Telekom'},
'389734':{'en': 'Vip'},
'38974':{'en': 'Mobik'},
'389752':{'en': 'Vip'},
'389753':{'en': 'Vip'},
'389754':{'en': 'Vip'},
'389755':{'en': 'Vip'},
'389756':{'en': 'Vip'},
'389757':{'en': 'Vip'},
'389758':{'en': 'Vip'},
'389759':{'en': 'Vip'},
'389762':{'en': 'Vip'},
'389763':{'en': 'Vip'},
'389764':{'en': 'Vip'},
'389765':{'en': 'Vip'},
'389766':{'en': 'Vip'},
'389767':{'en': 'Vip'},
'389768':{'en': 'Vip'},
'389769':{'en': 'Vip'},
'389771':{'en': 'Vip'},
'389772':{'en': 'Vip'},
'389773':{'en': 'Vip'},
'389774':{'en': 'Vip'},
'389775':{'en': 'Vip'},
'389776':{'en': 'Vip'},
'389777':{'en': 'Vip'},
'389778':{'en': 'Vip'},
'389779':{'en': 'Vip'},
'389781':{'en': 'Vip'},
'389782':{'en': 'Vip'},
'389783':{'en': 'Vip'},
'389784':{'en': 'Vip'},
'389785':{'en': 'Vip'},
'389786':{'en': 'Vip'},
'389787':{'en': 'Vip'},
'389788':{'en': 'Vip'},
'389789':{'en': 'Vip'},
'38979':{'en': 'Lycamobile'},
'39319':{'en': 'Intermatica'},
'3932':{'en': 'WIND'},
'3933':{'en': 'TIM'},
'3934':{'en': 'Vodafone'},
'3936':{'en': 'TIM'},
'39370':{'en': 'TIM'},
'39373':{'en': '3 Italia'},
'39377':{'en': 'Vodafone'},
'3938':{'en': 'WIND'},
'39383':{'en': 'Vodafone'},
'3939':{'en': '3 Italia'},
'407000':{'en': 'Enigma-System'},
'407013':{'en': 'Lycamobile'},
'407014':{'en': 'Lycamobile'},
'407015':{'en': 'Lycamobile'},
'407016':{'en': 'Lycamobile'},
'407017':{'en': 'Lycamobile'},
'407018':{'en': 'Lycamobile'},
'407019':{'en': 'Lycamobile'},
'407020':{'en': 'Lycamobile'},
'407050':{'en': 'Iristel'},
'40711':{'en': 'Telekom'},
'40712':{'en': '2K Telecom'},
'4072':{'en': 'Vodafone'},
'4073':{'en': 'Vodafone'},
'4074':{'en': 'Orange'},
'4075':{'en': 'Orange'},
'4076':{'en': 'Telekom'},
'40770':{'en': 'Digi Mobil'},
'40771':{'en': 'Digi Mobil'},
'40772':{'en': 'Digi Mobil'},
'40773':{'en': 'Digi Mobil'},
'40774':{'en': 'Digi Mobil'},
'40775':{'en': 'Digi Mobil'},
'40776':{'en': 'Digi Mobil'},
'4078':{'en': 'Telekom'},
'4079':{'en': 'Vodafone'},
'417500':{'en': 'Swisscom'},
'41754':{'en': 'Swisscom'},
'417550':{'en': 'Swisscom'},
'417551':{'en': 'Swisscom'},
'417552':{'en': 'Swisscom'},
'417553':{'en': 'Swisscom'},
'417600':{'en': 'Sunrise'},
'41762':{'en': 'Sunrise'},
'41763':{'en': 'Sunrise'},
'41764':{'en': 'Sunrise'},
'41765':{'en': 'Sunrise'},
'41766':{'en': 'Sunrise'},
'41767':{'en': 'Sunrise'},
'41768':{'en': 'Sunrise'},
'41769':{'en': 'Sunrise'},
'41770':{'en': 'Swisscom'},
'417710':{'en': 'Swisscom'},
'417712':{'en': 'Swisscom'},
'417713':{'en': 'Swisscom'},
'417715':{'en': 'Swisscom'},
'41772':{'en': 'Sunrise'},
'417730':{'en': 'Sunrise'},
'4177310':{'en': 'Sunrise'},
'4177311':{'en': 'Sunrise'},
'4177312':{'en': 'Sunrise'},
'4177313':{'en': 'Sunrise'},
'4177314':{'en': 'Sunrise'},
'4177315':{'en': 'Sunrise'},
'4177316':{'en': 'Sunrise'},
'4177357':{'en': 'In&Phone'},
'41774':{'en': 'Swisscom'},
'417750':{'en': 'Swisscom'},
'417751':{'en': 'Swisscom'},
'417752':{'en': 'Swisscom'},
'417753':{'en': 'Swisscom'},
'417780':{'en': 'BeeOne Communications'},
'417781':{'en': 'BeeOne Communications'},
'417788':{'en': 'Vectone Mobile Limited (Mundio)'},
'417789':{'en': 'Vectone Mobile Limited (Mundio)'},
'41779':{'en': 'Lycamobile'},
'41780':{'en': 'Salt'},
'41781':{'en': 'Salt'},
'41782':{'en': 'Salt'},
'41783':{'en': 'Salt'},
'417840':{'en': 'UPC Switzerland'},
'417841':{'en': 'UPC Switzerland'},
'417842':{'en': 'UPC Switzerland'},
'4178490':{'en': 'Telecom26 AG'},
'41785':{'en': 'Salt'},
'41786':{'en': 'Salt'},
'41787':{'en': 'Salt'},
'41788':{'en': 'Salt'},
'41789':{'en': 'Salt'},
'41790':{'en': 'Swisscom'},
'41791':{'en': 'Swisscom'},
'41792':{'en': 'Swisscom'},
'41793':{'en': 'Swisscom'},
'41794':{'en': 'Swisscom'},
'41795':{'en': 'Swisscom'},
'41796':{'en': 'Swisscom'},
'41797':{'en': 'Swisscom'},
'41798':{'en': 'Swisscom'},
'417990':{'en': 'Swisscom'},
'417991':{'en': 'Swisscom'},
'417992':{'en': 'Swisscom'},
'417993':{'en': 'Swisscom'},
'417994':{'en': 'Swisscom'},
'417995':{'en': 'Swisscom'},
'417996':{'en': 'Swisscom'},
'4179977':{'en': 'Relario AG (Bebbicell)'},
'4179978':{'en': 'Relario AG (Bebbicell)'},
'4179979':{'en': 'Relario AG (Bebbicell)'},
'417999':{'en': 'Comfone AG'},
'420601':{'en': 'O2'},
'420602':{'en': 'O2'},
'420603':{'en': 'T-Mobile'},
'420604':{'en': 'T-Mobile'},
'420605':{'en': 'T-Mobile'},
'420606':{'en': 'O2'},
'420607':{'en': 'O2'},
'420608':{'en': 'Vodafone'},
'420702':{'en': 'O2'},
'42070300':{'en': 'T-Mobile'},
'4207031':{'en': 'T-Mobile'},
'4207032':{'en': 'T-Mobile'},
'4207033':{'en': 'T-Mobile'},
'4207034':{'en': 'T-Mobile'},
'4207035':{'en': 'T-Mobile'},
'4207036':{'en': 'T-Mobile'},
'42070370':{'en': 'FAYN Telecommunications'},
'42070373':{'en': 'COMA'},
'4207038':{'en': 'T-Mobile'},
'4207039':{'en': 'T-Mobile'},
'4207040':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207041':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207042':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207043':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207044':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207045':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207047':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207050':{'en': 'O2'},
'4207051':{'en': 'O2'},
'4207052':{'en': 'O2'},
'4207053':{'en': 'O2'},
'4207054':{'en': 'O2'},
'42070570':{'en': 'T-Mobile'},
'42072':{'en': 'O2'},
'4207300':{'en': 'T-Mobile'},
'4207301':{'en': 'T-Mobile'},
'4207302':{'en': 'T-Mobile'},
'42073030':{'en': 'T-Mobile'},
'42073033':{'en': 'Axfone'},
'42073035':{'en': 'MATERNA Communications'},
'42073040':{'en': 'Compatel'},
'42073041':{'en': 'SMART Comp'},
'42073042':{'en': 'SMART Comp'},
'42073043':{'en': 'PODA a.s. (SkyNet)'},
'42073044':{'en': 'Vodafone'},
'42073045':{'en': 'Vodafone'},
'42073046':{'en': 'Vodafone'},
'42073047':{'en': 'Vodafone'},
'42073048':{'en': 'Vodafone'},
'4207305':{'en': 'T-Mobile'},
'4207306':{'en': 'T-Mobile'},
'42073070':{'en': 'T-Mobile'},
'42073072':{'en': 'Amcatel'},
'42073073':{'en': 'T-Mobile'},
'42073077':{'en': 'T-Mobile'},
'4207308':{'en': 'T-Mobile'},
'4207309':{'en': 'T-Mobile'},
'420731':{'en': 'T-Mobile'},
'420732':{'en': 'T-Mobile'},
'420733':{'en': 'T-Mobile'},
'420734':{'en': 'T-Mobile'},
'420735':{'en': 'T-Mobile'},
'420736':{'en': 'T-Mobile'},
'420737':{'en': 'T-Mobile'},
'420738':{'en': 'T-Mobile'},
'420739':{'en': 'T-Mobile'},
'4207700':{'en': 'Vodafone'},
'4207701':{'en': 'Vodafone'},
'4207702':{'en': 'Vodafone'},
'4207703':{'en': 'Vodafone'},
'4207704':{'en': 'Vodafone'},
'42077050':{'en': 'Compatel'},
'42077051':{'en': '3ton s.r.o.'},
'42077052':{'en': '3ton s.r.o.'},
'42077055':{'en': 'ASTELNET'},
'4207706':{'en': 'Vodafone'},
'42077071':{'en': 'Cesky bezdrat'},
'42077072':{'en': 'Cesky bezdrat'},
'42077073':{'en': 'T-Mobile'},
'42077077':{'en': 'T-Mobile'},
'42077080':{'en': 'Vodafone'},
'42077081':{'en': 'Vodafone'},
'42077082':{'en': 'Vodafone'},
'42077083':{'en': 'Vodafone'},
'42077084':{'en': 'Vodafone'},
'42077100':{'en': 'TT Quality s.r.o.'},
'42077111':{'en': 'miniTEL'},
'42077177':{'en': 'MONTYHO TECHNOLOGY s.r.o. (CANISTEC)'},
'42077200':{'en': 'TT Quality s.r.o.'},
'42077272':{'en': 'IPEX'},
'42077273':{'en': 'IPEX'},
'42077277':{'en': 'Dragon Internet'},
'420773':{'en': 'Vodafone'},
'420774':{'en': 'Vodafone'},
'420775':{'en': 'Vodafone'},
'420776':{'en': | |
"""
WA module for scraping and processing text from https://leg.wa.gov
# Status, as of January 1, 2022
Current Coverage (In Active Development):
[X] Committee Hearings (Audio Links) (2015 - 2020)
Planned Coverage:
[ ] Committee Hearings (Video Links) (2000 - 2014)
[ ] Floor Speeches (Video Links)
# WA Work Flow
CLASS Scrape
- wa_scrape_links by desired committee and legislative session.
Function filters TVW archives by function parameters
for links to each individual committee meeting for that calendar year
- wa_scrape_audio by wa_scrape_links output
Function downloads audio files to local drive
Renames the file names by committee name and date (YYYYMMDD) (e.g. wa_education_20200305.mp3)
CLASS Process
- wa_speech_to_text
Function gives the user option to convert audio file to a text transcript through DeepSpeech
Uses mp3 links directly to process the transcripts
Downloads the transcript in json form, single json for each committee/legislative session
- wa_text_clean
Function conducts tests and run light cleaning to ensure transcript is ready for text analysis
"""
from collections import OrderedDict
from datetime import datetime
import os
import sys
import re
import time
import urllib
from bs4 import BeautifulSoup
import selenium
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from statelegiscraper.assets.package import wa_committees
"""
#TESTING
dir_chrome_webdriver = "/Users/katherinechang/Google Drive/My Drive/State Legislatures/StateLegiscraper/statelegiscraper/assets/chromedriver/chromedriver_v96_m1"
param_committee = "Senate Early Learning & K-12 Education"
wa_committees.senate_standing
param_year = "2015"
dir_save = "/Users/katherinechang/Google Drive/My Drive/State Legislatures/"
"""
class Scrape:
"""
Scrape functions for Washington State Legislature website
Current coverage includes committee hearing audio links
"""
def wa_scrape_links(param_committee, param_year, dir_chrome_webdriver, dir_save):
"""
Webscrape function for Washington State Legislature Website for 2015-2020 sessions
Parameters
----------
param_committee : String
Standing committee hearing name.
See available list through package assets
"from statelegiscraper.assets.package import wa_names"
param_year : String
Calendar year (Current coverage limited to 2015-2021).
dir_chrome_webdriver : String
Local directory that contains the appropriate Chrome Webdriver.
dir_save : String
Local directory to save JSON with audio links.
Returns
-------
A JSON file saved locally with selected committee and year audio links
"""
if not isinstance(param_committee, str):
raise ValueError("Committee name must be a string")
else:
pass
if not isinstance(param_year, str):
raise ValueError("Year selection must be a string")
else:
pass
if not os.path.exists(dir_chrome_webdriver):
raise ValueError("Chrome Webdriver not found")
else:
pass
if not os.path.exists(dir_save):
raise ValueError("Save directory not found")
else:
pass
############
#--> DRIVER SETUP
service = Service(dir_chrome_webdriver)
options = webdriver.ChromeOptions()
# Chrome runs headless,
# comment out "options.add_argument('headless')"
# to see the action
# options.add_argument('headless')
driver = webdriver.Chrome(service=service, options=options)
############
#--> OPEN TO TVW ARCHIVES
driver.get("https://tvw.org/video-search/")
time.sleep(5)
############
#--> CLICK CATEGORIES TO OPEN
driver.find_element(By.CLASS_NAME, "MuiGrid-grid-xs-12").click()
# INPUT COMMITTEE NAME
input_search = driver.find_element(By.XPATH, "//input[contains(@class, 'MuiInputBase-input MuiInput-input')]")
input_search.send_keys(param_committee)
#TEST NOTE: Need to check that this specific div class is clickable for each committee selection
committee_script_list =["//div[@class='MuiListItemText-root jss3 jss4 MuiListItemText-multiline'",
" and @title='",
param_committee,
"']"]
separator = ""
committee_script = separator.join(committee_script_list)
# SELECT COMMITTEE NAME FROM DROP DOWN
driver.find_element(By.XPATH, committee_script).click()
# CHECK THAT COMMITTEE NAME FROM DROP DOWN IS SELECTED
committee_name_assert = driver.find_element(By.XPATH, "//span[@class='MuiChip-label']").get_attribute("innerHTML")
#Ensure amp is the same
if re.search("&", committee_name_assert):
committee_name_assert = committee_name_assert.replace("&", "&")
else:
pass
assert committee_name_assert == param_committee, "Committee Name Not Selected"
############
#--> SELECT START DATE BY LEGISLATIVE SESSION (JANUARY 1-3, WHICHEVER FALLS ON WEEKDAY)
# Date is reliant on current date, start month set to January 1st of cal year
calendar_dropdown = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__input-container']")
calendar_dropdown[0].click()
date_elements = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
start_date = date_elements[0].get_attribute("value")
start_datetime = datetime.strptime(start_date, '%m/%d/%Y').date()
previous_month_click = "//button[@class='react-datepicker__navigation react-datepicker__navigation--previous']"
def _loop_january(driver, upper_range: int) -> ():
if upper_range == 0:
return
for i in range(0, upper_range):
driver.find_element(By.XPATH, previous_month_click).click()
def _loop_first():
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--001']").click()
except:
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--002']").click()
except:
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--003']").click()
except:
pass
_loop_january(driver, start_datetime.month-1)
_loop_first()
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").is_enabled()
calendar_dropdown[0].click()
except NoSuchElementException:
pass
param_dates = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
param_start_date = param_dates[0].get_attribute("value")
param_start_datetime = datetime.strptime(param_start_date, '%m/%d/%Y').date()
assert (param_start_datetime.month == 1), "Start Date not set to January"
assert (param_start_datetime.day <=3), "Start Date not set between January 1-3"
#--> SELECT START YEAR (ESTABLISHED BY PARAM_YEAR)
#check if dropdown is down
try:
calendar_dropdown[0].click()
except:
calendar_dropdown = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__input-container']")
calendar_dropdown[0].click()
assert driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").is_enabled(), "Calendar dropdown not available"
date_elements = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
year_date = date_elements[0].get_attribute("value")
year_datetime = datetime.strptime(year_date, '%m/%d/%Y').date()
driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").click()
#Year dropdown is dynamic according to date, code clicks according to present values
check_yr = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
check_yr_values=[]
for y in range(len(check_yr)):
check_yr_values.append(check_yr[y].get_attribute("innerHTML"))
#Click previous until year appears on year_list
#if not param_year in year_list:
while not param_year in check_yr_values:
driver.find_element(By.XPATH, "//a[@class='react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-previous']").click()
while_year = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
while_year_values=[]
for y in range(len(while_year)):
while_year_values.append(while_year[y].get_attribute("innerHTML"))
if param_year in while_year_values:
break
year_list = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
year_list_values=[]
for y in range(len(year_list)):
year_list_values.append(year_list[y].get_attribute("innerHTML"))
assert param_year in year_list_values, "param_year not in year dropdown list"
def _year_select(param_year):
#Click according to the param_year
if param_year == "2021":
param_y = year_list_values.index("2021")
year_list[param_y].click()
elif param_year == "2020":
param_y = year_list_values.index("2020")
year_list[param_y].click()
elif param_year == "2019":
param_y = year_list_values.index("2019")
year_list[param_y].click()
elif param_year == "2018":
param_y = year_list_values.index("2018")
year_list[param_y].click()
elif param_year == "2017":
param_y = year_list_values.index("2017")
year_list[param_y].click()
elif param_year == "2016":
param_y = year_list_values.index("2016")
year_list[param_y].click()
elif param_year == "2015":
param_y = year_list_values.index("2015")
year_list[param_y].click()
else:
"Invalid Year. Current coverage limited to 2015 to 2021"
if (year_datetime.year != int(param_year)):
_year_select(param_year)
_loop_first()
else:
pass
param_dates = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
param_start_date = param_dates[0].get_attribute("value")
param_start_datetime = datetime.strptime(param_start_date, '%m/%d/%Y').date()
assert (param_start_datetime.year == int(param_year)), "Start Date not set to param_year"
assert (param_start_datetime.day <=3), "Start Date not set between January 1-3"
time.sleep(2)
############
#--> SELECT END DATE BY LEGISLATIVE SESSION (DECEMBER)
calendar_dropdown[1].click()
date_elements = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
end_date = date_elements[1].get_attribute("value")
end_datetime = datetime.strptime(end_date, '%m/%d/%Y').date()
next_month_click = "//button[@class='react-datepicker__navigation react-datepicker__navigation--next']"
def _loop_december(driver, upper_range: int) -> ():
if upper_range == 12:
return
for i in range(0, (12-upper_range)):
driver.find_element(By.XPATH, next_month_click).click()
def _loop_end():
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--031']").click()
except:
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--030']").click()
except:
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__day react-datepicker__day--029']").click()
except:
pass
_loop_december(driver, end_datetime.month)
_loop_end()
try:
driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").is_enabled()
calendar_dropdown[0].click()
except NoSuchElementException:
pass
param_dates = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
param_end_date = param_dates[1].get_attribute("value")
param_end_datetime = datetime.strptime(param_end_date, '%m/%d/%Y').date()
assert (param_end_datetime.month == int(12)), "End Date not set to December"
assert (param_end_datetime.day >=29), "End Date not set between December 29-31"
#--> SELECT END YEAR (ESTABLISHED BY PARAM_YEAR)
try:
calendar_dropdown[1].click()
except:
calendar_dropdown = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__input-container']")
calendar_dropdown[1].click()
assert driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").is_enabled(), "Calendar dropdown not available"
date_elements = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
end_year_date = date_elements[1].get_attribute("value")
end_year_datetime = datetime.strptime(year_date, '%m/%d/%Y').date()
driver.find_element(By.XPATH, "//div[@class='react-datepicker__header']").click()
#Year dropdown is dynamic according to date, code clicks according to present values
check_yr = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
check_yr_values=[]
for y in range(len(check_yr)):
check_yr_values.append(check_yr[y].get_attribute("innerHTML"))
#Click previous until year appears on year_list
while not param_year in check_yr_values:
driver.find_element(By.XPATH, "//a[@class='react-datepicker__navigation react-datepicker__navigation--years react-datepicker__navigation--years-previous']").click()
while_year = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
while_year_values=[]
for y in range(len(while_year)):
while_year_values.append(while_year[y].get_attribute("innerHTML"))
if param_year in while_year_values:
break
year_list = driver.find_elements(By.XPATH, "//div[@class='react-datepicker__year-option']")
year_list_values=[]
for y in range(len(year_list)):
year_list_values.append(year_list[y].get_attribute("innerHTML"))
assert param_year in year_list_values, "param_year not in year dropdown list"
if (end_year_datetime.year != int(param_year)):
_year_select(param_year)
_loop_end()
else:
calendar_dropdown[1].click()
param_dates = driver.find_elements(By.XPATH, "//input[@class='css-13hc3dd']")
param_end_date = param_dates[1].get_attribute("value")
param_end_datetime = datetime.strptime(param_end_date, '%m/%d/%Y').date()
assert (param_end_datetime.year == int(param_year)), "End Date not set to param_year"
assert (param_end_datetime.day >=29), "End Date not set between December 29-31"
############
#--> PRESS SUBMIT
driver.find_element(By.XPATH, "//button[@class='filter__form-submit css-1l4j2co']").click()
############
# SAVE HTML FOR MULTIPLE PAGES
url_html = []
url_html.append(driver.page_source) #CURRENT PAGE, PAGE 1
soup_html_home = BeautifulSoup(url_html[0])
no_results_assert=soup_html_home.find('div', {'class': re.compile(r'fallback-states__NoResults.*')})
if no_results_assert is None: #There are results
pass
else: #There are no results
if no_results_assert.text == str("No Events Available"): #Check if there's the string
url_html=["No results found"]
print("Search results yielded no hearing meetings")
#driver.close() #If no results breaks script, but otherwise run
#break #Probably need to print why break so user knows what's up
else:
pass
page_num = soup_html_home.find_all('button', | |
<filename>ckanext/activity/logic/action.py
# -*- coding: utf-8 -*-
from __future__ import annotations
import logging
import datetime
import json
from typing import Any, Optional
import ckan.plugins.toolkit as tk
from ckan.logic import validate
from ckan.types import Context, DataDict, ActionResult
import ckanext.activity.email_notifications as email_notifications
from . import schema
from ..model import activity as model_activity, activity_dict_save
log = logging.getLogger(__name__)
def send_email_notifications(
context: Context, data_dict: DataDict
) -> ActionResult.SendEmailNotifications:
"""Send any pending activity stream notification emails to users.
You must provide a sysadmin's API key/token in the Authorization header of
the request, or call this action from the command-line via a `ckan notify
send_emails ...` command.
"""
tk.check_access("send_email_notifications", context, data_dict)
if not tk.config.get_value("ckan.activity_streams_email_notifications"):
raise tk.ValidationError(
{
"message": (
"ckan.activity_streams_email_notifications"
" is not enabled in config"
)
}
)
email_notifications.get_and_send_notifications_for_all_users()
def dashboard_mark_activities_old(
context: Context, data_dict: DataDict
) -> ActionResult.DashboardMarkActivitiesOld:
"""Mark all the authorized user's new dashboard activities as old.
This will reset
:py:func:`~ckan.logic.action.get.dashboard_new_activities_count` to 0.
"""
tk.check_access("dashboard_mark_activities_old", context, data_dict)
model = context["model"]
user_obj = model.User.get(context["user"])
assert user_obj
user_id = user_obj.id
model.Dashboard.get(
user_id
).activity_stream_last_viewed = datetime.datetime.utcnow()
if not context.get("defer_commit"):
model.repo.commit()
def activity_create(
context: Context, data_dict: DataDict
) -> Optional[dict[str, Any]]:
"""Create a new activity stream activity.
You must be a sysadmin to create new activities.
:param user_id: the name or id of the user who carried out the activity,
e.g. ``'seanh'``
:type user_id: string
:param object_id: the name or id of the object of the activity, e.g.
``'my_dataset'``
:param activity_type: the type of the activity, this must be an activity
type that CKAN knows how to render, e.g. ``'new package'``,
``'changed user'``, ``'deleted group'`` etc.
:type activity_type: string
:param data: any additional data about the activity
:type data: dictionary
:returns: the newly created activity
:rtype: dictionary
"""
tk.check_access("activity_create", context, data_dict)
if not tk.config.get_value("ckan.activity_streams_enabled"):
return
model = context["model"]
# Any revision_id that the caller attempts to pass in the activity_dict is
# ignored and removed here.
if "revision_id" in data_dict:
del data_dict["revision_id"]
sch = context.get("schema") or schema.default_create_activity_schema()
data, errors = tk.navl_validate(data_dict, sch, context)
if errors:
raise tk.ValidationError(errors)
activity = activity_dict_save(data, context)
if not context.get("defer_commit"):
model.repo.commit()
log.debug("Created '%s' activity" % activity.activity_type)
return model_activity.activity_dictize(activity, context)
@validate(schema.default_activity_list_schema)
def user_activity_list(
context: Context, data_dict: DataDict
) -> list[dict[str, Any]]:
"""Return a user's public activity stream.
You must be authorized to view the user's profile.
:param id: the id or name of the user
:type id: string
:param offset: where to start getting activity items from
(optional, default: ``0``)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: ``31`` unless set in site's configuration
``ckan.activity_list_limit``, upper limit: ``100`` unless set in
site's configuration ``ckan.activity_list_limit_max``)
:type limit: int
:rtype: list of dictionaries
"""
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
tk.check_access("user_activity_list", context, data_dict)
model = context["model"]
user_ref = data_dict.get("id") # May be user name or id.
user = model.User.get(user_ref)
if user is None:
raise tk.ObjectNotFound()
offset = data_dict.get("offset", 0)
limit = data_dict["limit"] # defaulted, limited & made an int by schema
activity_objects = model_activity.user_activity_list(
user.id, limit=limit, offset=offset
)
return model_activity.activity_list_dictize(activity_objects, context)
@validate(schema.default_activity_list_schema)
def package_activity_list(
context: Context, data_dict: DataDict
) -> list[dict[str, Any]]:
"""Return a package's activity stream (not including detail)
You must be authorized to view the package.
:param id: the id or name of the package
:type id: string
:param offset: where to start getting activity items from
(optional, default: ``0``)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: ``31`` unless set in site's configuration
``ckan.activity_list_limit``, upper limit: ``100`` unless set in
site's configuration ``ckan.activity_list_limit_max``)
:type limit: int
:param after: After timestamp
(optional, default: ``None``)
:type after: int, str
:param before: Before timestamp
(optional, default: ``None``)
:type before: int, str
:param include_hidden_activity: whether to include 'hidden' activity, which
is not shown in the Activity Stream page. Hidden activity includes
activity done by the site_user, such as harvests, which are not shown
in the activity stream because they can be too numerous, or activity by
other users specified in config option `ckan.hide_activity_from_users`.
NB Only sysadmins may set include_hidden_activity to true.
(default: false)
:type include_hidden_activity: bool
:param activity_types: A list of activity types to include in the response
:type activity_types: list
:param exclude_activity_types: A list of activity types to exclude from the
response
:type exclude_activity_types: list
:rtype: list of dictionaries
"""
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
include_hidden_activity = data_dict.get("include_hidden_activity", False)
activity_types = data_dict.pop("activity_types", None)
exclude_activity_types = data_dict.pop("exclude_activity_types", None)
if activity_types is not None and exclude_activity_types is not None:
raise tk.ValidationError(
{
"activity_types": [
"Cannot be used together with `exclude_activity_types"
]
}
)
tk.check_access("package_activity_list", context, data_dict)
model = context["model"]
package_ref = data_dict.get("id") # May be name or ID.
package = model.Package.get(package_ref)
if package is None:
raise tk.ObjectNotFound()
offset = int(data_dict.get("offset", 0))
limit = data_dict["limit"] # defaulted, limited & made an int by schema
after = data_dict.get("after")
before = data_dict.get("before")
activity_objects = model_activity.package_activity_list(
package.id,
limit=limit,
offset=offset,
after=after,
before=before,
include_hidden_activity=include_hidden_activity,
activity_types=activity_types,
exclude_activity_types=exclude_activity_types,
)
return model_activity.activity_list_dictize(activity_objects, context)
@validate(schema.default_activity_list_schema)
def group_activity_list(
context: Context, data_dict: DataDict
) -> list[dict[str, Any]]:
"""Return a group's activity stream.
You must be authorized to view the group.
:param id: the id or name of the group
:type id: string
:param offset: where to start getting activity items from
(optional, default: ``0``)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: ``31`` unless set in site's configuration
``ckan.activity_list_limit``, upper limit: ``100`` unless set in
site's configuration ``ckan.activity_list_limit_max``)
:type limit: int
:param include_hidden_activity: whether to include 'hidden' activity, which
is not shown in the Activity Stream page. Hidden activity includes
activity done by the site_user, such as harvests, which are not shown
in the activity stream because they can be too numerous, or activity by
other users specified in config option `ckan.hide_activity_from_users`.
NB Only sysadmins may set include_hidden_activity to true.
(default: false)
:type include_hidden_activity: bool
:rtype: list of dictionaries
"""
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
data_dict = dict(data_dict, include_data=False)
include_hidden_activity = data_dict.get("include_hidden_activity", False)
tk.check_access("group_activity_list", context, data_dict)
group_id = data_dict.get("id")
offset = data_dict.get("offset", 0)
limit = data_dict["limit"] # defaulted, limited & made an int by schema
# Convert group_id (could be id or name) into id.
group_show = tk.get_action("group_show")
group_id = group_show(context, {"id": group_id})["id"]
activity_objects = model_activity.group_activity_list(
group_id,
limit=limit,
offset=offset,
include_hidden_activity=include_hidden_activity,
)
return model_activity.activity_list_dictize(activity_objects, context)
@validate(schema.default_activity_list_schema)
def organization_activity_list(
context: Context, data_dict: DataDict
) -> list[dict[str, Any]]:
"""Return a organization's activity stream.
:param id: the id or name of the organization
:type id: string
:param offset: where to start getting activity items from
(optional, default: ``0``)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: ``31`` unless set in site's configuration
``ckan.activity_list_limit``, upper limit: ``100`` unless set in
site's configuration ``ckan.activity_list_limit_max``)
:type limit: int
:param include_hidden_activity: whether to include 'hidden' activity, which
is not shown in the Activity Stream page. Hidden activity includes
activity done by the site_user, such as harvests, which are not shown
in the activity stream because they can be too numerous, or activity by
other users specified in config option `ckan.hide_activity_from_users`.
NB Only sysadmins may set include_hidden_activity to true.
(default: false)
:type include_hidden_activity: bool
:rtype: list of dictionaries
"""
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
include_hidden_activity = data_dict.get("include_hidden_activity", False)
tk.check_access("organization_activity_list", context, data_dict)
org_id = data_dict.get("id")
offset = data_dict.get("offset", 0)
limit = data_dict["limit"] # defaulted, limited & made an int by schema
# Convert org_id (could be id or name) into id.
org_show = tk.get_action("organization_show")
org_id = org_show(context, {"id": org_id})["id"]
activity_objects = model_activity.organization_activity_list(
org_id,
limit=limit,
offset=offset,
include_hidden_activity=include_hidden_activity,
)
return model_activity.activity_list_dictize(activity_objects, context)
@validate(schema.default_dashboard_activity_list_schema)
def recently_changed_packages_activity_list(
context: Context, data_dict: DataDict
) -> list[dict[str, Any]]:
"""Return the activity stream of all recently added or changed packages.
:param offset: where to start getting activity items from
(optional, default: ``0``)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: ``31`` unless set in site's configuration
``ckan.activity_list_limit``, upper | |
expressions we tell
# Elasticsearch to use when normalizing fields that will be used
# for searching.
filters = []
for filter_name in CurrentMapping.AUTHOR_CHAR_FILTER_NAMES:
configuration = CurrentMapping.CHAR_FILTERS[filter_name]
find = re.compile(configuration['pattern'])
replace = configuration['replacement']
# Hack to (imperfectly) convert Java regex format to Python format.
# $1 -> \1
replace = replace.replace("$", "\\")
filters.append((find, replace))
def filters_to(start, finish):
"""When all the filters are applied to `start`,
the result is `finish`.
"""
for find, replace in filters:
start = find.sub(replace, start)
assert start == finish
# Only the primary author is considered for sorting purposes.
filters_to("<NAME> ; <NAME>", "<NAME>")
# The special system author '[Unknown]' is replaced with
# REPLACEMENT CHARACTER so it will be last in sorted lists.
filters_to("[Unknown]", u"\N{REPLACEMENT CHARACTER}")
# Periods are removed.
filters_to("<NAME>.", "<NAME>")
filters_to("<NAME>", "<NAME>")
# The initials of authors who go by initials are normalized
# so that their books all sort together.
filters_to("Wells, HG", "Wells, HG")
filters_to("Wells, H G", "Wells, HG")
filters_to("Wells, H.G.", "Wells, HG")
filters_to("Wells, H. G.", "Wells, HG")
# It works with up to three initials.
filters_to("<NAME>.", "Tolkien, JRR")
# Parentheticals are removed.
filters_to("<NAME>. (<NAME>)", "Wells, HG")
class TestExternalSearchWithWorks(EndToEndSearchTest):
"""These tests run against a real search index with works in it.
The setup is very slow, so all the tests are in the same method.
Don't add new methods to this class - add more tests into test_query_works,
or add a new test class.
"""
def populate_works(self):
_work = self.default_work
self.moby_dick = _work(
title="Mob<NAME>", authors="<NAME>", fiction=True,
)
self.moby_dick.presentation_edition.subtitle = "Or, the Whale"
self.moby_dick.presentation_edition.series = "Classics"
self.moby_dick.summary_text = "Ishmael"
self.moby_dick.presentation_edition.publisher = "Project Gutenberg"
self.moby_dick.last_update_time = datetime.datetime(2019, 1, 1)
self.moby_duck = _work(title="Moby Duck", authors="<NAME>", fiction=False)
self.moby_duck.presentation_edition.subtitle = "The True Story of 28,800 Bath Toys Lost at Sea"
self.moby_duck.summary_text = "A compulsively readable narrative"
self.moby_duck.presentation_edition.publisher = "Penguin"
self.moby_duck.last_update_time = datetime.datetime(2019, 1, 2)
# This book is not currently loanable. It will still show up
# in search results unless the library's settings disable it.
self.moby_duck.license_pools[0].licenses_available = 0
self.title_match = _work(title="Match")
self.subtitle_match = _work(title="SubtitleM")
self.subtitle_match.presentation_edition.subtitle = "Match"
self.summary_match = _work(title="SummaryM")
self.summary_match.summary_text = "It's a Match! The story of a work whose summary contained an important keyword."
self.publisher_match = _work(title="PublisherM")
self.publisher_match.presentation_edition.publisher = "Match"
self.tess = _work(title="Tess of the d'Urbervilles")
self.tiffany = _work(title="Breakfast at Tiffany's")
self.les_mis = _work()
self.les_mis.presentation_edition.title = u"Les Mis\u00E9rables"
self.modern_romance = _work(title="Modern Romance")
self.lincoln = _work(genre="Biography & Memoir", title="<NAME>")
self.washington = _work(genre="Biography", title="<NAME>")
self.lincoln_vampire = _work(title="<NAME>: <NAME>", genre="Fantasy")
self.children_work = _work(title="<NAME> Wonderland", audience=Classifier.AUDIENCE_CHILDREN)
self.all_ages_work = _work(title="The Annotated Alice", audience=Classifier.AUDIENCE_ALL_AGES)
self.ya_work = _work(title="Go Ask Alice", audience=Classifier.AUDIENCE_YOUNG_ADULT)
self.adult_work = _work(title="Still Alice", audience=Classifier.AUDIENCE_ADULT)
self.research_work = _work(
title="Curiouser and Curiouser: Surrealism and Repression in 'Alice in Wonderland'",
audience=Classifier.AUDIENCE_RESEARCH
)
self.ya_romance = _work(
title="Gumby In Love",
audience=Classifier.AUDIENCE_YOUNG_ADULT, genre="Romance"
)
self.ya_romance.presentation_edition.subtitle = (
"Modern Fairytale Series, Volume 7"
)
self.ya_romance.presentation_edition.series = "Modern Fairytales"
self.no_age = _work()
self.no_age.summary_text = "President Barack Obama's election in 2008 energized the United States"
# Set the series to the empty string rather than None -- this isn't counted
# as the book belonging to a series.
self.no_age.presentation_edition.series = ""
self.age_4_5 = _work()
self.age_4_5.target_age = NumericRange(4, 5, '[]')
self.age_4_5.summary_text = "President Barack Obama's election in 2008 energized the United States"
self.age_5_6 = _work(fiction=False)
self.age_5_6.target_age = NumericRange(5, 6, '[]')
self.obama = _work(
title="<NAME>", genre="Biography & Memoir"
)
self.obama.target_age = NumericRange(8, 8, '[]')
self.obama.summary_text = "President <NAME>'s election in 2008 energized the United States"
self.dodger = _work()
self.dodger.target_age = NumericRange(8, 8, '[]')
self.dodger.summary_text = "Willie finds himself running for student council president"
self.age_9_10 = _work()
self.age_9_10.target_age = NumericRange(9, 10, '[]')
self.age_9_10.summary_text = "President <NAME>'s election in 2008 energized the United States"
self.age_2_10 = _work()
self.age_2_10.target_age = NumericRange(2, 10, '[]')
self.pride = _work(title="Pride and Prejudice (E)")
self.pride.presentation_edition.medium = Edition.BOOK_MEDIUM
self.pride_audio = _work(title="Pride and Prejudice (A)")
self.pride_audio.presentation_edition.medium = Edition.AUDIO_MEDIUM
self.sherlock = _work(
title="The Adventures of Sherlock Holmes",
with_open_access_download=True
)
self.sherlock.presentation_edition.language = "eng"
self.sherlock_spanish = _work(title="Las Aventuras de Sherlock Holmes")
self.sherlock_spanish.presentation_edition.language = "spa"
# Create a custom list that contains a few books.
self.presidential, ignore = self._customlist(
name="Nonfiction about US Presidents", num_entries=0
)
for work in [self.washington, self.lincoln, self.obama]:
self.presidential.add_entry(work)
# Create a second collection that only contains a few books.
self.tiny_collection = self._collection("A Tiny Collection")
self.tiny_book = self._work(
title="A Tiny Book", with_license_pool=True,
collection=self.tiny_collection
)
self.tiny_book.license_pools[0].self_hosted = True
# Both collections contain 'The Adventures of Sherlock
# Holmes", but each collection licenses the book through a
# different mechanism.
self.sherlock_pool_2 = self._licensepool(
edition=self.sherlock.presentation_edition,
collection=self.tiny_collection
)
sherlock_2, is_new = self.sherlock_pool_2.calculate_work()
assert self.sherlock == sherlock_2
assert 2 == len(self.sherlock.license_pools)
# These books look good for some search results, but they
# will be filtered out by the universal filters, and will
# never show up in results.
# We own no copies of this book.
self.no_copies = _work(title="Moby Dick 2")
self.no_copies.license_pools[0].licenses_owned = 0
# This book's only license pool has been suppressed.
self.suppressed = _work(title="Moby Dick 2")
self.suppressed.license_pools[0].suppressed = True
# This book is not presentation_ready.
self.not_presentation_ready = _work(title="Moby Dick 2")
self.not_presentation_ready.presentation_ready = False
def test_query_works(self):
# An end-to-end test of the search functionality.
#
# Works created during setup are added to a real search index.
# We then run actual Elasticsearch queries against the
# search index and verify that the work IDs returned
# are the ones we expect.
# First, run some basic checks to make sure the search
# document query doesn't contain over-zealous joins. This test
# class is the main place where we make a large number of
# works and generate search documents for them.
assert 1 == len(self.moby_dick.to_search_document()['licensepools'])
assert ("Audio" ==
self.pride_audio.to_search_document()['licensepools'][0]['medium'])
# Set up convenient aliases for methods we'll be calling a
# lot.
query = self.search.query_works
expect = self._expect_results
# First, test pagination.
first_item = Pagination(size=1, offset=0)
expect(self.moby_dick, "moby dick", None, first_item)
second_item = first_item.next_page
expect(self.moby_duck, "moby dick", None, second_item)
two_per_page = Pagination(size=2, offset=0)
expect(
[self.moby_dick, self.moby_duck],
"moby dick", None, two_per_page
)
# Now try some different search queries.
# Search in title.
assert 2 == len(query("moby"))
# Search in author name
expect(self.moby_dick, "melville")
# Search in subtitle
expect(self.moby_dick, "whale")
# Search in series.
expect(self.moby_dick, "classics")
# Search in summary.
expect(self.moby_dick, "ishmael")
# Search in publisher name.
expect(self.moby_dick, "gutenberg")
# Title > subtitle > word found in summary > publisher
order = [
self.title_match,
self.subtitle_match,
self.summary_match,
self.publisher_match,
]
expect(order, "match")
# A search for a partial title match + a partial author match
# considers only books that match both fields.
expect(
[self.moby_dick],
"moby melville"
)
# Match a quoted phrase
# 'Moby-Dick' is the first result because it's an exact title
# match. '<NAME>' is the second result because it's a fuzzy
# match,
expect([self.moby_dick, self.moby_duck], '"moby dick"')
# Match a stemmed word: 'running' is stemmed to 'run', and
# so is 'runs'.
expect(self.dodger, "runs")
# Match a misspelled phrase: 'movy' -> 'moby'.
expect([self.moby_dick, self.moby_duck], "movy", ordered=False)
# Match a misspelled author: 'mleville' -> 'melville'
expect(self.moby_dick, "mleville")
# TODO: This is clearly trying to match "<NAME>", but it
# matches nothing. This is because at least two of the strings
# in a query must match. Neither "di" nor "ck" matches a fuzzy
# search on its own, which means "moby" is the only thing that
# matches, and that's not enough.
expect([], "moby di ck")
# Here, "dic" is close enough to "dick" that the fuzzy match
# kicks in. With both "moby" and "dic" matching, it's okay
# that "k" was a dud.
expect([self.moby_dick], "moby dic k")
# A query without an apostrophe matches a word that contains
# one. (this is a feature of the stemmer.)
expect(self.tess, "durbervilles")
expect(self.tiffany, "tiffanys")
# A query with an 'e' matches a word that contains an
# e-with-acute. (this is managed by the 'asciifolding' filter in
# the analyzers)
expect(self.les_mis, "les miserables")
# Find results based on fiction status.
#
# Here, Moby-Dick (fiction) is privileged over Moby Duck
# (nonfiction)
expect([self.moby_dick], "fiction moby")
# | |
"""
Returns the value of the `encrypted` property.
"""
return self._encrypted
@encrypted.setter
def encrypted(self, value):
"""
Sets the value of the `encrypted` property.
"""
Struct._check_type('encrypted', value, InheritableBoolean)
self._encrypted = value
@property
def bandwidth(self):
"""
Returns the value of the `bandwidth` property.
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, value):
"""
Sets the value of the `bandwidth` property.
"""
Struct._check_type('bandwidth', value, MigrationBandwidth)
self._bandwidth = value
@property
def auto_converge(self):
"""
Returns the value of the `auto_converge` property.
"""
return self._auto_converge
@auto_converge.setter
def auto_converge(self, value):
"""
Sets the value of the `auto_converge` property.
"""
Struct._check_type('auto_converge', value, InheritableBoolean)
self._auto_converge = value
@property
def compressed(self):
"""
Returns the value of the `compressed` property.
"""
return self._compressed
@compressed.setter
def compressed(self, value):
"""
Sets the value of the `compressed` property.
"""
Struct._check_type('compressed', value, InheritableBoolean)
self._compressed = value
@property
def policy(self):
"""
Returns the value of the `policy` property.
"""
return self._policy
@policy.setter
def policy(self, value):
"""
Sets the value of the `policy` property.
"""
Struct._check_type('policy', value, MigrationPolicy)
self._policy = value
class MigrationPolicy(Identified):
def __init__(
self,
comment=None,
description=None,
id=None,
name=None,
):
super(MigrationPolicy, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
pass
class Network(Identified):
def __init__(
self,
cluster=None,
comment=None,
data_center=None,
description=None,
display=None,
dns_resolver_configuration=None,
external_provider=None,
external_provider_physical_network=None,
id=None,
ip=None,
mtu=None,
name=None,
network_labels=None,
permissions=None,
port_isolation=None,
profile_required=None,
qos=None,
required=None,
status=None,
stp=None,
usages=None,
vdsm_name=None,
vlan=None,
vnic_profiles=None,
):
super(Network, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.cluster = cluster
self.data_center = data_center
self.display = display
self.dns_resolver_configuration = dns_resolver_configuration
self.external_provider = external_provider
self.external_provider_physical_network = external_provider_physical_network
self.ip = ip
self.mtu = mtu
self.network_labels = network_labels
self.permissions = permissions
self.port_isolation = port_isolation
self.profile_required = profile_required
self.qos = qos
self.required = required
self.status = status
self.stp = stp
self.usages = usages
self.vdsm_name = vdsm_name
self.vlan = vlan
self.vnic_profiles = vnic_profiles
@property
def dns_resolver_configuration(self):
"""
Returns the value of the `dns_resolver_configuration` property.
"""
return self._dns_resolver_configuration
@dns_resolver_configuration.setter
def dns_resolver_configuration(self, value):
"""
Sets the value of the `dns_resolver_configuration` property.
"""
Struct._check_type('dns_resolver_configuration', value, DnsResolverConfiguration)
self._dns_resolver_configuration = value
@property
def cluster(self):
"""
Returns the value of the `cluster` property.
"""
return self._cluster
@cluster.setter
def cluster(self, value):
"""
Sets the value of the `cluster` property.
"""
Struct._check_type('cluster', value, Cluster)
self._cluster = value
@property
def external_provider_physical_network(self):
"""
Returns the value of the `external_provider_physical_network` property.
"""
return self._external_provider_physical_network
@external_provider_physical_network.setter
def external_provider_physical_network(self, value):
"""
Sets the value of the `external_provider_physical_network` property.
"""
Struct._check_type('external_provider_physical_network', value, Network)
self._external_provider_physical_network = value
@property
def display(self):
"""
Returns the value of the `display` property.
"""
return self._display
@display.setter
def display(self, value):
"""
Sets the value of the `display` property.
"""
self._display = value
@property
def profile_required(self):
"""
Returns the value of the `profile_required` property.
"""
return self._profile_required
@profile_required.setter
def profile_required(self, value):
"""
Sets the value of the `profile_required` property.
"""
self._profile_required = value
@property
def vdsm_name(self):
"""
Returns the value of the `vdsm_name` property.
"""
return self._vdsm_name
@vdsm_name.setter
def vdsm_name(self, value):
"""
Sets the value of the `vdsm_name` property.
"""
self._vdsm_name = value
@property
def ip(self):
"""
Returns the value of the `ip` property.
"""
return self._ip
@ip.setter
def ip(self, value):
"""
Sets the value of the `ip` property.
"""
Struct._check_type('ip', value, Ip)
self._ip = value
@property
def network_labels(self):
"""
Returns the value of the `network_labels` property.
"""
return self._network_labels
@network_labels.setter
def network_labels(self, value):
"""
Sets the value of the `network_labels` property.
"""
self._network_labels = value
@property
def mtu(self):
"""
Returns the value of the `mtu` property.
"""
return self._mtu
@mtu.setter
def mtu(self, value):
"""
Sets the value of the `mtu` property.
"""
self._mtu = value
@property
def vnic_profiles(self):
"""
Returns the value of the `vnic_profiles` property.
"""
return self._vnic_profiles
@vnic_profiles.setter
def vnic_profiles(self, value):
"""
Sets the value of the `vnic_profiles` property.
"""
self._vnic_profiles = value
@property
def port_isolation(self):
"""
Returns the value of the `port_isolation` property.
"""
return self._port_isolation
@port_isolation.setter
def port_isolation(self, value):
"""
Sets the value of the `port_isolation` property.
"""
self._port_isolation = value
@property
def data_center(self):
"""
Returns the value of the `data_center` property.
"""
return self._data_center
@data_center.setter
def data_center(self, value):
"""
Sets the value of the `data_center` property.
"""
Struct._check_type('data_center', value, DataCenter)
self._data_center = value
@property
def stp(self):
"""
Returns the value of the `stp` property.
"""
return self._stp
@stp.setter
def stp(self, value):
"""
Sets the value of the `stp` property.
"""
self._stp = value
@property
def required(self):
"""
Returns the value of the `required` property.
"""
return self._required
@required.setter
def required(self, value):
"""
Sets the value of the `required` property.
"""
self._required = value
@property
def external_provider(self):
"""
Returns the value of the `external_provider` property.
"""
return self._external_provider
@external_provider.setter
def external_provider(self, value):
"""
Sets the value of the `external_provider` property.
"""
Struct._check_type('external_provider', value, OpenStackNetworkProvider)
self._external_provider = value
@property
def permissions(self):
"""
Returns the value of the `permissions` property.
"""
return self._permissions
@permissions.setter
def permissions(self, value):
"""
Sets the value of the `permissions` property.
"""
self._permissions = value
@property
def vlan(self):
"""
Returns the value of the `vlan` property.
"""
return self._vlan
@vlan.setter
def vlan(self, value):
"""
Sets the value of the `vlan` property.
"""
Struct._check_type('vlan', value, Vlan)
self._vlan = value
@property
def qos(self):
"""
Returns the value of the `qos` property.
"""
return self._qos
@qos.setter
def qos(self, value):
"""
Sets the value of the `qos` property.
"""
Struct._check_type('qos', value, Qos)
self._qos = value
@property
def usages(self):
"""
Returns the value of the `usages` property.
"""
return self._usages
@usages.setter
def usages(self, value):
"""
Sets the value of the `usages` property.
"""
self._usages = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, NetworkStatus)
self._status = value
class NetworkAttachment(Identified):
def __init__(
self,
comment=None,
description=None,
dns_resolver_configuration=None,
host=None,
host_nic=None,
id=None,
in_sync=None,
ip_address_assignments=None,
name=None,
network=None,
properties=None,
qos=None,
reported_configurations=None,
):
super(NetworkAttachment, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.dns_resolver_configuration = dns_resolver_configuration
self.host = host
self.host_nic = host_nic
self.in_sync = in_sync
self.ip_address_assignments = ip_address_assignments
self.network = network
self.properties = properties
self.qos = qos
self.reported_configurations = reported_configurations
@property
def in_sync(self):
"""
Returns the value of the `in_sync` property.
"""
return self._in_sync
@in_sync.setter
def in_sync(self, value):
"""
Sets the value of the `in_sync` property.
"""
self._in_sync = value
@property
def reported_configurations(self):
"""
Returns the value of the `reported_configurations` property.
"""
return self._reported_configurations
@reported_configurations.setter
def reported_configurations(self, value):
"""
Sets the value of the `reported_configurations` property.
"""
self._reported_configurations = value
@property
def dns_resolver_configuration(self):
"""
Returns the value of the `dns_resolver_configuration` property.
"""
return self._dns_resolver_configuration
@dns_resolver_configuration.setter
def dns_resolver_configuration(self, value):
"""
Sets the value of the `dns_resolver_configuration` property.
"""
Struct._check_type('dns_resolver_configuration', value, DnsResolverConfiguration)
self._dns_resolver_configuration = value
@property
def qos(self):
"""
Returns the value of the `qos` property.
"""
return self._qos
@qos.setter
def qos(self, value):
"""
Sets the value of the `qos` property.
"""
Struct._check_type('qos', value, Qos)
self._qos = value
@property
def host(self):
"""
Returns the value of the `host` property.
"""
return self._host
@host.setter
def host(self, value):
"""
Sets the value of the `host` property.
"""
Struct._check_type('host', value, Host)
self._host = value
@property
def ip_address_assignments(self):
"""
Returns the value of the `ip_address_assignments` property.
"""
return self._ip_address_assignments
@ip_address_assignments.setter
def ip_address_assignments(self, value):
"""
Sets the value of the `ip_address_assignments` property.
"""
self._ip_address_assignments = value
@property
def network(self):
"""
Returns the value of the `network` property.
"""
return self._network
@network.setter
def network(self, value):
"""
Sets the value of the `network` property.
"""
Struct._check_type('network', value, Network)
self._network = value
@property
def host_nic(self):
"""
Returns the value of the `host_nic` property.
"""
return self._host_nic
@host_nic.setter
def host_nic(self, value):
"""
Sets the value of the `host_nic` property.
"""
Struct._check_type('host_nic', value, HostNic)
self._host_nic = value
@property
def properties(self):
"""
Returns the value of the `properties` property.
"""
return self._properties
@properties.setter
def properties(self, value):
"""
Sets the value of the `properties` property.
"""
self._properties = value
class NetworkConfiguration(Struct):
def __init__(
self,
dns=None,
nics=None,
):
super(NetworkConfiguration, self).__init__(
)
self.dns = dns
self.nics = nics
@property
def nics(self):
"""
Returns the value of the `nics` property.
"""
return self._nics
@nics.setter
def nics(self, value):
"""
Sets the value of the `nics` | |
'within a code block. The local variables of a code block can be\n'
'determined by scanning the entire text of the block for name '
'binding\n'
'operations.\n'
'\n'
'If the global statement occurs within a block, all uses of the '
'name\n'
'specified in the statement refer to the binding of that name in '
'the\n'
'top-level namespace. Names are resolved in the top-level namespace '
'by\n'
'searching the global namespace, i.e. the namespace of the module\n'
'containing the code block, and the builtins namespace, the '
'namespace\n'
'of the module "__builtin__". The global namespace is searched '
'first.\n'
'If the name is not found there, the builtins namespace is '
'searched.\n'
'The global statement must precede all uses of the name.\n'
'\n'
'The builtins namespace associated with the execution of a code '
'block\n'
'is actually found by looking up the name "__builtins__" in its '
'global\n'
'namespace; this should be a dictionary or a module (in the latter '
'case\n'
"the module's dictionary is used). By default, when in the "
'"__main__"\n'
'module, "__builtins__" is the built-in module "__builtin__" (note: '
'no\n'
'\'s\'); when in any other module, "__builtins__" is an alias for '
'the\n'
'dictionary of the "__builtin__" module itself. "__builtins__" can '
'be\n'
'set to a user-created dictionary to create a weak form of '
'restricted\n'
'execution.\n'
'\n'
'**CPython implementation detail:** Users should not touch\n'
'"__builtins__"; it is strictly an implementation detail. Users\n'
'wanting to override values in the builtins namespace should '
'"import"\n'
'the "__builtin__" (no \'s\') module and modify its attributes\n'
'appropriately.\n'
'\n'
'The namespace for a module is automatically created the first time '
'a\n'
'module is imported. The main module for a script is always '
'called\n'
'"__main__".\n'
'\n'
'The "global" statement has the same scope as a name binding '
'operation\n'
'in the same block. If the nearest enclosing scope for a free '
'variable\n'
'contains a global statement, the free variable is treated as a '
'global.\n'
'\n'
'A class definition is an executable statement that may use and '
'define\n'
'names. These references follow the normal rules for name '
'resolution.\n'
'The namespace of the class definition becomes the attribute '
'dictionary\n'
'of the class. Names defined at the class scope are not visible '
'in\n'
'methods.\n'
'\n'
'\n'
'Interaction with dynamic features\n'
'=================================\n'
'\n'
'There are several cases where Python statements are illegal when '
'used\n'
'in conjunction with nested scopes that contain free variables.\n'
'\n'
'If a variable is referenced in an enclosing scope, it is illegal '
'to\n'
'delete the name. An error will be reported at compile time.\n'
'\n'
'If the wild card form of import --- "import *" --- is used in a\n'
'function and the function contains or is a nested block with free\n'
'variables, the compiler will raise a "SyntaxError".\n'
'\n'
'If "exec" is used in a function and the function contains or is a\n'
'nested block with free variables, the compiler will raise a\n'
'"SyntaxError" unless the exec explicitly specifies the local '
'namespace\n'
'for the "exec". (In other words, "exec obj" would be illegal, '
'but\n'
'"exec obj in ns" would be legal.)\n'
'\n'
'The "eval()", "execfile()", and "input()" functions and the '
'"exec"\n'
'statement do not have access to the full environment for '
'resolving\n'
'names. Names may be resolved in the local and global namespaces '
'of\n'
'the caller. Free variables are not resolved in the nearest '
'enclosing\n'
'namespace, but in the global namespace. [1] The "exec" statement '
'and\n'
'the "eval()" and "execfile()" functions have optional arguments '
'to\n'
'override the global and local namespace. If only one namespace '
'is\n'
'specified, it is used for both.\n',
'numbers': '\n'
'Numeric literals\n'
'****************\n'
'\n'
'There are four types of numeric literals: plain integers, long\n'
'integers, floating point numbers, and imaginary numbers. There '
'are no\n'
'complex literals (complex numbers can be formed by adding a real\n'
'number and an imaginary number).\n'
'\n'
'Note that numeric literals do not include a sign; a phrase like '
'"-1"\n'
'is actually an expression composed of the unary operator \'"-"\' '
'and the\n'
'literal "1".\n',
'numeric-types': '\n'
'Emulating numeric types\n'
'***********************\n'
'\n'
'The following methods can be defined to emulate numeric '
'objects.\n'
'Methods corresponding to operations that are not supported '
'by the\n'
'particular kind of number implemented (e.g., bitwise '
'operations for\n'
'non-integral numbers) should be left undefined.\n'
'\n'
'object.__add__(self, other)\n'
'object.__sub__(self, other)\n'
'object.__mul__(self, other)\n'
'object.__floordiv__(self, other)\n'
'object.__mod__(self, other)\n'
'object.__divmod__(self, other)\n'
'object.__pow__(self, other[, modulo])\n'
'object.__lshift__(self, other)\n'
'object.__rshift__(self, other)\n'
'object.__and__(self, other)\n'
'object.__xor__(self, other)\n'
'object.__or__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "//", "%", "divmod()", '
'"pow()", "**",\n'
' "<<", ">>", "&", "^", "|"). For instance, to evaluate '
'the\n'
' expression "x + y", where *x* is an instance of a class '
'that has an\n'
' "__add__()" method, "x.__add__(y)" is called. The '
'"__divmod__()"\n'
' method should be the equivalent to using '
'"__floordiv__()" and\n'
' "__mod__()"; it should not be related to "__truediv__()" '
'(described\n'
' below). Note that "__pow__()" should be defined to '
'accept an\n'
' optional third argument if the ternary version of the '
'built-in\n'
' "pow()" function is to be supported.\n'
'\n'
' If one of those methods does not support the operation '
'with the\n'
' supplied arguments, it should return "NotImplemented".\n'
'\n'
'object.__div__(self, other)\n'
'object.__truediv__(self, other)\n'
'\n'
' The division operator ("/") is implemented by these '
'methods. The\n'
' "__truediv__()" method is used when '
'"__future__.division" is in\n'
' effect, otherwise "__div__()" is used. If only one of '
'these two\n'
' methods is defined, the object will not support division '
'in the\n'
' alternate context; "TypeError" will be raised instead.\n'
'\n'
'object.__radd__(self, other)\n'
'object.__rsub__(self, other)\n'
'object.__rmul__(self, other)\n'
'object.__rdiv__(self, other)\n'
'object.__rtruediv__(self, other)\n'
'object.__rfloordiv__(self, other)\n'
'object.__rmod__(self, other)\n'
'object.__rdivmod__(self, other)\n'
'object.__rpow__(self, other)\n'
'object.__rlshift__(self, other)\n'
'object.__rrshift__(self, other)\n'
'object.__rand__(self, other)\n'
'object.__rxor__(self, other)\n'
'object.__ror__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "/", "%", "divmod()", '
'"pow()", "**",\n'
' "<<", ">>", "&", "^", "|") with reflected (swapped) '
'operands.\n'
' These functions are only called if the left operand does '
'not\n'
' support the corresponding operation and the operands are '
'of\n'
' different types. [2] For instance, to evaluate the '
'expression "x -\n'
' y", where *y* is an instance of a class that has an '
'"__rsub__()"\n'
' method, "y.__rsub__(x)" is called if "x.__sub__(y)" '
'returns\n'
' *NotImplemented*.\n'
'\n'
' Note that ternary "pow()" will not try calling '
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
" Note: If the right operand's type is a subclass of the "
'left\n'
" operand's type and that subclass provides the "
'reflected method\n'
' for the operation, this method will be called before '
'the left\n'
" operand's non-reflected method. This behavior allows "
'subclasses\n'
" to override their ancestors' operations.\n"
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
'object.__imul__(self, other)\n'
'object.__idiv__(self, other)\n'
'object.__itruediv__(self, other)\n'
'object.__ifloordiv__(self, other)\n'
'object.__imod__(self, other)\n'
'object.__ipow__(self, other[, modulo])\n'
'object.__ilshift__(self, other)\n'
'object.__irshift__(self, other)\n'
'object.__iand__(self, other)\n'
'object.__ixor__(self, other)\n'
'object.__ior__(self, other)\n'
'\n'
' These methods are called to implement the augmented '
'arithmetic\n'
' assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", '
'"<<=",\n'
' ">>=", "&=", "^=", "|="). These methods should attempt '
'to do the\n'
' operation in-place (modifying *self*) and return the '
'result (which\n'
' could be, but does not have to be, *self*). If a '
'specific method\n'
' is not defined, the augmented assignment falls back to '
'the normal\n'
' methods. For instance, to execute the statement "x += '
'y", where\n'
' *x* is an instance of a class that has an | |
<gh_stars>0
# chinascope_algotrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from abc import abstractmethod, ABCMeta
from algotrade import const
import six
from algotrade.broker import broker
# from algotrade.broker import fillstrategy
# from algotrade import warninghelpers
# from chinascope_algotrade import logger
# import chinascope_algotrade.bar
######################################################################
# Commission models
class Commission(six.with_metaclass(ABCMeta)):
"""Base class for implementing different commission schemes.
.. note::
This is a base class and should not be used directly.
"""
# __metaclass__ = abc.ABCMeta
@abstractmethod
def calculate(self, order, price, quantity):
"""Calculates the commission for an order execution.
:param order: The order being executed.
:type order: :class:`chinascope_algotrade.broker.BaseOrder`.
:param price: The price for each share.
:type price: float.
:param quantity: The order size.
:type quantity: float.
:rtype: float.
"""
raise NotImplementedError()
class NoCommission(Commission):
"""A :class:`Commission` class that always returns 0."""
def calculate(self, order, price, quantity):
return 0
class FixedPerTrade(Commission):
"""A :class:`Commission` class that charges a fixed amount for the whole trade.
:param amount: The commission for an order.
:type amount: float.
"""
def __init__(self, amount):
self.__amount = amount
def calculate(self, order, price, quantity):
ret = 0
# Only charge the first fill.
if order.execution_info is None:
ret = self.__amount
return ret
class TradePercentage(Commission):
"""A :class:`Commission` class that charges a percentage of the whole trade.
:param percentage: The percentage to charge. 0.01 means 1%, and so on. It must be smaller than 1.
:type percentage: float.
"""
def __init__(self, percentage):
assert (percentage < 1)
self.__percentage = percentage
def calculate(self, order, price, quantity):
return price * quantity * self.__percentage
######################################################################
# Orders
class BacktestingOrder(object):
def __init__(self):
self.__accepted = None
@property
def accepted_date_time(self):
return self.__accepted
@accepted_date_time.setter
def accepted_date_time(self, date_time):
self.__accepted = date_time
# Override to call the fill strategy using the concrete order type.
# return FillInfo or None if the order should not be filled.
def process(self, broker_, bar_):
raise NotImplementedError()
class MarketOrder(broker.MarketOrder, BacktestingOrder):
def __init__(self, action, instrument, quantity, on_close, instrument_traits):
broker.MarketOrder.__init__(self, action, instrument, quantity, on_close, instrument_traits)
BacktestingOrder.__init__(self)
def process(self, broker_, bar_):
return broker_.fill_strategy.fillMarketOrder(broker_, self, bar_)
class LimitOrder(broker.LimitOrder, BacktestingOrder):
def __init__(self, action, instrument, limit_price, quantity, instrument_traits):
broker.LimitOrder.__init__(self, action, instrument, limit_price, quantity, instrument_traits)
BacktestingOrder.__init__(self)
def process(self, broker_, bar_):
return broker_.fill_strategy.fillLimitOrder(broker_, self, bar_)
class StopOrder(broker.StopOrder, BacktestingOrder):
def __init__(self, action, instrument, stop_price, quantity, instrument_traits):
broker.StopOrder.__init__(self, action, instrument, stop_price, quantity, instrument_traits)
BacktestingOrder.__init__(self)
self.__stop_hit = False
def process(self, broker_, bar_):
return broker_.fill_strategy.fillStopOrder(broker_, self, bar_)
@property
def stop_hit(self):
return self.__stop_hit
@stop_hit.setter
def stop_hit(self, stop_hit):
self.__stop_hit = stop_hit
# http://www.sec.gov/answers/stoplim.htm
# http://www.interactivebrokers.com/en/trading/orders/stopLimit.php
class StopLimitOrder(broker.StopLimitOrder, BacktestingOrder):
def __init__(self, action, instrument, stop_price, limit_price, quantity, instrument_traits):
broker.StopLimitOrder.__init__(self, action, instrument, stop_price, limit_price, quantity, instrument_traits)
BacktestingOrder.__init__(self)
self.__stop_hit = False # Set to true when the limit order is activated (stop price is hit)
@property
def stop_hit(self):
return self.__stop_hit
@stop_hit.setter
def stop_hit(self, stop_hit):
self.__stop_hit = stop_hit
def is_limit_order_active(self):
# TODO: Deprecated since v0.15. Use stop_hit instead.
return self.__stop_hit
def process(self, broker_, bar_):
return broker_.fill_strategy.fillStopLimitOrder(broker_, self, bar_)
######################################################################
# BackTestingBroker
class BackTestingBroker(broker.BaseBroker):
"""Backtesting broker.
:param cash: The initial amount of cash.
:type cash: int/float.
:param barfeed: The bar feed that will provide the bars.
:type barfeed: :class:`chinascope_algotrade.barfeed.CSVBarFeed`
:param commission: An object responsible for calculating order commissions.
:type commission: :class:`Commission`
"""
LOGGER_NAME = "broker.backtesting"
def __init__(self, cash, barfeed, commission=None):
broker.BaseBroker.__init__(self)
assert (cash >= 0)
self.__cash = cash
if commission is None:
self.__commission = NoCommission()
else:
self.__commission = commission
self.__shares = {}
self.__active_orders = {}
self.__use_adjustedValues = False
# self.__fill_strategy = fillstrategy.DefaultStrategy()
# self.__logger = logger.logger(BackTestingBroker.LOGGER_NAME)
# It is VERY important that the broker subscribes to barfeed events before the strategy.
barfeed.get_new_values_event().subscribe(self.on_bars)
self.__barfeed = barfeed
self.__allow_negative_cash = False
self.__nextOrderId = 1
def _get_next_order_id(self):
ret = self.__nextOrderId
self.__nextOrderId += 1
return ret
def _get_bar(self, bars, instrument):
ret = bars.bar(instrument)
if ret is None:
ret = self.__barfeed.get_last_bar(instrument)
return ret
def _register_order(self, order):
assert (order.id not in self.__active_orders)
assert (order.id is not None)
self.__active_orders[order.id] = order
def _unregister_order(self, order):
assert (order.id in self.__active_orders)
assert (order.id is not None)
del self.__active_orders[order.id]
@property
def logger(self):
return self.__logger
def set_allow_negative_cash(self, allow_negative_cash):
self.__allow_negative_cash = allow_negative_cash
def get_cash(self, include_short=True):
ret = self.__cash
if not include_short and self.__barfeed.current_bars is not None:
bars = self.__barfeed.current_bars
for instrument, shares in self.__shares.iteritems():
if shares < 0:
instrument_price = self._get_bar(bars, instrument).close(self.use_adjusted_values)
ret += instrument_price * shares
return ret
def set_cash(self, num_cash):
self.__cash = num_cash
@property
def commission(self):
"""Returns the strategy used to calculate order commissions.
:rtype: :class:`Commission`.
"""
return self.__commission
@commission.setter
def commission(self, commission):
"""Sets the strategy to use to calculate order commissions.
:param commission: An object responsible for calculating order commissions.
:type commission: :class:`Commission`.
"""
self.__commission = commission
@property
def fill_strategy(self):
"""Returns the :class:`chinascope_algotrade.broker.fillstrategy.FillStrategy` currently set."""
return self.__fill_strategy
@fill_strategy.setter
def fill_strategy(self, strategy):
"""Sets the :class:`chinascope_algotrade.broker.fillstrategy.FillStrategy` to use."""
self.__fill_strategy = strategy
@property
def use_adjusted_values(self):
return self.__use_adjustedValues
def set_use_adjusted_values(self, use_adjusted, deprecationCheck=None):
# Deprecated since v0.15
if not self.__barfeed.bars_have_adj_close():
raise Exception("The barfeed doesn't support adjusted close values")
if deprecationCheck is None:
pass
# warninghelpers.deprecation_warning(
# "set_use_adjusted_values will be deprecated in the next version. Please use set_use_adjusted_values on the strategy instead.",
# stacklevel=2
# )
self.__use_adjustedValues = use_adjusted
def active_orders(self, instrument=None):
if instrument is None:
ret = self.__active_orders.values()
else:
ret = [order for order in self.__active_orders.values() if order.instrument == instrument]
return ret
def get_pending_orders(self):
# warninghelpers.deprecation_warning(
# "get_pending_orders will be deprecated in the next version. Please use active_orders instead.",
# stacklevel=2
# )
return self.active_orders()
@property
def _current_datetime(self):
return self.__barfeed.current_datetime
def instrument_traits(self, instrument):
return broker.IntegerTraits()
def shares(self, instrument):
return self.__shares.get(instrument, 0)
def positions(self):
return self.__shares
@property
def active_instruments(self):
return [instrument for instrument, shares in self.__shares.iteritems() if shares != 0]
def __get_equity_with_bars(self, bars):
ret = self.cash
if bars is not None:
for instrument, shares in self.__shares.iteritems():
instrument_price = self._get_bar(bars, instrument).close(self.use_adjusted_values)
ret += instrument_price * shares
return ret
@property
def equity(self):
"""Returns the portfolio value (cash + shares)."""
return self.__get_equity_with_bars(self.__barfeed.current_bars)
# Tries to commit an order execution.
def commit_order_execution(self, order, date_time, fill_info):
price = fill_info.price
quantity = fill_info.quantity
if order.is_a_buy_order:
cost = price * quantity * -1
assert (cost < 0)
shares_delta = quantity
elif order.is_a_sell_order:
cost = price * quantity
assert (cost > 0)
shares_delta = quantity * -1
else: # Unknown action
assert False
commission = self.commission.calculate(order, price, quantity)
cost -= commission
resulting_cash = self.cash + cost
# Check that we're ok on cash after the commission.
if resulting_cash >= 0 or self.__allow_negative_cash:
# Update the order before updating internal state since add_execution_info may raise.
# add_execution_info should switch the order state.
order_execution_info = broker.OrderExecutionInfo(price, quantity, commission, date_time)
order.add_execution_info(order_execution_info)
# Commit the order execution.
self.__cash = resulting_cash
updated_shares = order.instrument_traits.round_quantity(
self.shares(order.instrument) + shares_delta
)
if updated_shares == 0:
del self.__shares[order.instrument]
else:
self.__shares[order.instrument] = updated_shares
# Let the strategy know that the order was filled.
self.__fill_strategy.onOrderFilled(self, order)
# Notify the order update
if order.is_filled:
self._unregister_order(order)
self.notify_order_event(broker.OrderEvent(order, broker.OrderEvent.Type.FILLED, order_execution_info))
elif order.is_partially_filled:
self.notify_order_event(
broker.OrderEvent(order, broker.OrderEvent.Type.PARTIALLY_FILLED, order_execution_info)
)
else:
assert False
else:
self.__logger.debug("Not enough cash to fill %s order [%s] for %d share/s" % (
order.instrument,
order.id,
order.remaining
))
def submit_order(self, order):
if order.is_initial:
order.submit_date_time(self._get_next_order_id(), self._current_datetime)
self._register_order(order)
# Switch from INITIAL -> SUBMITTED
# IMPORTANT: Do not emit an event for this switch because when using the position interface
# the order is not yet mapped to the position and Position.on_order_updated will get called.
order.switch_state(broker.BaseOrder.State.SUBMITTED)
else:
raise Exception("The order was already processed")
# Return True if further processing is needed.
def __pre_process_order(self, order, bar_):
ret = True
# For non-GTC orders we need to check if the order has expired.
if not order.good_till_canceled:
expired = bar_.date_time.date() > order.accepted_date_time.date()
# Cancel the order if it is expired.
if expired:
ret = False
self._unregister_order(order)
order.switch_state(broker.BaseOrder.State.CANCELED)
self.notify_order_event(broker.OrderEvent(order, broker.OrderEvent.Type.CANCELED, "Expired"))
return ret
def __post_process_order(self, order, bar_):
# For non-GTC orders and daily (or greater) | |
expr[str]"""
with self.assertRaises(
TypeError, msg="failed to raise expected error using string multiplier"
):
expr2 = pp.Word(pp.alphas)["2"]
def testParseResultsNewEdgeCases(self):
"""test less common paths of ParseResults.__new__()"""
# create new ParseResults w/ None
result1 = pp.ParseResults(None)
print(result1.dump())
self.assertParseResultsEquals(
result1, [], msg="ParseResults(None) should return empty ParseResults"
)
# create new ParseResults w/ integer name
result2 = pp.ParseResults(name=12)
print(result2.dump())
self.assertEqual(
"12",
result2.getName(),
"ParseResults int name should be accepted and converted to str",
)
# create new ParseResults w/ generator type
gen = (a for a in range(1, 6))
result3 = pp.ParseResults(gen)
print(result3.dump())
expected3 = [1, 2, 3, 4, 5]
self.assertParseResultsEquals(
result3, expected3, msg="issue initializing ParseResults w/ gen type"
)
def testParseResultsReversed(self):
"""test simple case of reversed(ParseResults)"""
tst = "1 2 3 4 5"
expr = pp.OneOrMore(pp.Word(pp.nums))
result = expr.parseString(tst)
reversed_list = [ii for ii in reversed(result)]
print(reversed_list)
expected = ["5", "4", "3", "2", "1"]
self.assertEqual(
expected, reversed_list, msg="issue calling reversed(ParseResults)"
)
def testParseResultsValues(self):
"""test simple case of ParseResults.values()"""
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString("spam eggs")
values_set = set(result.values())
print(values_set)
expected = {"spam", "eggs"}
self.assertEqual(
expected, values_set, msg="issue calling ParseResults.values()"
)
def testParseResultsAppend(self):
"""test simple case of ParseResults.append()"""
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
expr = pp.OneOrMore(pp.Word(pp.nums)).addParseAction(append_sum)
result = expr.parseString("0 123 321")
expected = ["0", "123", "321", 444]
print(result.dump())
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.append()"
)
def testParseResultsClear(self):
"""test simple case of ParseResults.clear()"""
tst = "spam eggs"
expr = pp.Word(pp.alphas)("first") + pp.Word(pp.alphas)("second")
result = expr.parseString(tst)
print(result.dump())
self.assertParseResultsEquals(
result, ["spam", "eggs"], msg="issue with ParseResults before clear()"
)
result.clear()
print(result.dump())
self.assertParseResultsEquals(
result,
expected_list=[],
expected_dict={},
msg="issue with ParseResults.clear()",
)
def testParseResultsExtendWithString(self):
"""test ParseResults.extend() with input of type str"""
# use a parse action to append the reverse of the matched strings to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
tst = "abc def ghi"
expr = pp.OneOrMore(pp.Word(pp.alphas))
result = expr.addParseAction(make_palindrome).parseString(tst)
print(result.dump())
expected = ["abc", "def", "ghi", "ihg", "fed", "cba"]
self.assertParseResultsEquals(
result, expected, msg="issue with ParseResults.extend(str)"
)
def testParseResultsExtendWithParseResults(self):
"""test ParseResults.extend() with input of type ParseResults"""
expr = pp.OneOrMore(pp.Word(pp.alphas))
result1 = expr.parseString("spam eggs")
result2 = expr.parseString("foo bar")
result1.extend(result2)
print(result1.dump())
expected = ["spam", "eggs", "foo", "bar"]
self.assertParseResultsEquals(
result1, expected, msg="issue with ParseResults.extend(ParseResults)"
)
def testParseResultsFromDict(self):
"""test helper classmethod ParseResults.from_dict()"""
dict = {
"first": "123",
"second": 456,
"third": {"threeStr": "789", "threeInt": 789},
}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
print(result.dump())
expected = {name: dict}
self.assertParseResultsEquals(
result,
expected_dict=expected,
msg="issue creating ParseResults.from _dict()",
)
def testParseResultsDir(self):
"""test dir(ParseResults)"""
dict = {"first": "123", "second": "456", "third": "789"}
name = "trios"
result = pp.ParseResults.from_dict(dict, name=name)
dir_result = dir(result)
print(dir_result)
self.assertIn(
name, dir_result, msg="name value wasn't returned by dir(ParseResults)"
)
self.assertIn(
"asList", dir_result, msg="asList was not returned by dir(ParseResults)"
)
def testParseResultsInsert(self):
"""test ParseResults.insert() with named tokens"""
from random import randint
result = pp.Word(pp.alphas)[...].parseString("A B C D E F G H I J")
compare_list = result.asList()
print(result)
print(compare_list)
for s in "abcdefghij":
index = randint(-5, 5)
result.insert(index, s)
compare_list.insert(index, s)
print(result)
print(compare_list)
self.assertParseResultsEquals(
result, compare_list, msg="issue with ParseResults.insert()"
)
def testIgnoreString(self):
"""test ParserElement.ignore() passed a string arg"""
tst = "I like totally like love pickles"
expr = pp.Word(pp.alphas)[...].ignore("like")
result = expr.parseString(tst)
print(result)
expected = ["I", "totally", "love", "pickles"]
self.assertParseResultsEquals(result, expected, msg="issue with ignore(string)")
def testParseHTMLTags(self):
test = """
<BODY>
<BODY BGCOLOR="#00FFCC">
<BODY BGCOLOR="#00FFAA"/>
<BODY BGCOLOR='#00FFBB' FGCOLOR=black>
<BODY/>
</BODY>
"""
results = [
("startBody", False, "", ""),
("startBody", False, "#00FFCC", ""),
("startBody", True, "#00FFAA", ""),
("startBody", False, "#00FFBB", "black"),
("startBody", True, "", ""),
("endBody", False, "", ""),
]
bodyStart, bodyEnd = pp.makeHTMLTags("BODY")
resIter = iter(results)
for t, s, e in (bodyStart | bodyEnd).scanString(test):
print(test[s:e], "->", t)
(expectedType, expectedEmpty, expectedBG, expectedFG) = next(resIter)
print(t.dump())
if "startBody" in t:
self.assertEqual(
expectedEmpty,
bool(t.empty),
"expected {} token, got {}".format(
expectedEmpty and "empty" or "not empty",
t.empty and "empty" or "not empty",
),
)
self.assertEqual(
expectedBG,
t.bgcolor,
"failed to match BGCOLOR, expected {}, got {}".format(
expectedBG, t.bgcolor
),
)
self.assertEqual(
expectedFG,
t.fgcolor,
"failed to match FGCOLOR, expected {}, got {}".format(
expectedFG, t.bgcolor
),
)
elif "endBody" in t:
print("end tag")
pass
else:
print("BAD!!!")
def testSetParseActionUncallableErr(self):
"""raise a TypeError in setParseAction() by adding uncallable arg"""
expr = pp.Literal("A")("Achar")
uncallable = 12
with self.assertRaises(TypeError):
expr.setParseAction(uncallable)
res = expr.parseString("A")
print(res.dump())
def testMulWithNegativeNumber(self):
"""raise a ValueError in __mul__ by multiplying a negative number"""
with self.assertRaises(ValueError):
pp.Literal("A")("Achar") * (-1)
def testMulWithEllipsis(self):
"""multiply an expression with Ellipsis as ``expr * ...`` to match ZeroOrMore"""
expr = pp.Literal("A")("Achar") * ...
res = expr.parseString("A")
self.assertEqual(["A"], res.asList(), "expected expr * ... to match ZeroOrMore")
print(res.dump())
def testUpcaseDowncaseUnicode(self):
from pyparsing import pyparsing_unicode as ppu
import sys
a = "\u00bfC\u00f3mo esta usted?"
if not JYTHON_ENV:
ualphas = ppu.alphas
else:
ualphas = "".join(
chr(i)
for i in list(range(0xD800)) + list(range(0xE000, sys.maxunicode))
if chr(i).isalpha()
)
uword = pp.Word(ualphas).setParseAction(ppc.upcaseTokens)
print = lambda *args: None
print(uword.searchString(a))
uword = pp.Word(ualphas).setParseAction(ppc.downcaseTokens)
print(uword.searchString(a))
kw = pp.Keyword("mykey", caseless=True).setParseAction(ppc.upcaseTokens)(
"rname"
)
ret = kw.parseString("mykey")
print(ret.rname)
self.assertEqual(
"MYKEY", ret.rname, "failed to upcase with named result (pyparsing_common)"
)
kw = pp.Keyword("MYKEY", caseless=True).setParseAction(ppc.downcaseTokens)(
"rname"
)
ret = kw.parseString("mykey")
print(ret.rname)
self.assertEqual("mykey", ret.rname, "failed to upcase with named result")
if not IRON_PYTHON_ENV:
# test html data
html = "<TR class=maintxt bgColor=#ffffff> \
<TD vAlign=top>Производитель, модель</TD> \
<TD vAlign=top><STRONG>BenQ-Siemens CF61</STRONG></TD> \
" # .decode('utf-8')
# 'Manufacturer, model
text_manuf = "Производитель, модель"
manufacturer = pp.Literal(text_manuf)
td_start, td_end = pp.makeHTMLTags("td")
manuf_body = (
td_start.suppress()
+ manufacturer
+ pp.SkipTo(td_end)("cells*")
+ td_end.suppress()
)
def testParseUsingRegex(self):
import re
signedInt = pp.Regex(r"[-+][0-9]+")
unsignedInt = pp.Regex(r"[0-9]+")
simpleString = pp.Regex(r'("[^\"]*")|(\'[^\']*\')')
namedGrouping = pp.Regex(r'("(?P<content>[^\"]*)")')
compiledRE = pp.Regex(re.compile(r"[A-Z]+"))
def testMatch(expression, instring, shouldPass, expectedString=None):
if shouldPass:
try:
result = expression.parseString(instring)
print(
"{} correctly matched {}".format(
repr(expression), repr(instring)
)
)
if expectedString != result[0]:
print("\tbut failed to match the pattern as expected:")
print(
"\tproduced %s instead of %s"
% (repr(result[0]), repr(expectedString))
)
return True
except pp.ParseException:
print(
"%s incorrectly failed to match %s"
% (repr(expression), repr(instring))
)
else:
try:
result = expression.parseString(instring)
print(
"{} incorrectly matched {}".format(
repr(expression), repr(instring)
)
)
print("\tproduced %s as a result" % repr(result[0]))
except pp.ParseException:
print(
"%s correctly failed to match %s"
% (repr(expression), repr(instring))
)
return True
return False
# These should fail
self.assertTrue(
testMatch(signedInt, "1234 foo", False), "Re: (1) passed, expected fail"
)
self.assertTrue(
testMatch(signedInt, " +foo", False), "Re: (2) passed, expected fail"
)
self.assertTrue(
testMatch(unsignedInt, "abc", False), "Re: (3) passed, expected fail"
)
self.assertTrue(
testMatch(unsignedInt, "+123 foo", False), "Re: (4) passed, expected fail"
)
self.assertTrue(
testMatch(simpleString, "foo", False), "Re: (5) passed, expected fail"
)
self.assertTrue(
testMatch(simpleString, "\"foo bar'", False),
"Re: (6) passed, expected fail",
)
self.assertTrue(
testMatch(simpleString, "'foo bar\"", False),
"Re: (7) passed, expected fail",
)
# These should pass
self.assertTrue(
testMatch(signedInt, " +123", True, "+123"),
"Re: (8) failed, expected pass",
)
self.assertTrue(
testMatch(signedInt, "+123", True, "+123"), "Re: (9) failed, expected pass"
)
self.assertTrue(
testMatch(signedInt, "+123 foo", True, "+123"),
"Re: (10) failed, expected pass",
)
self.assertTrue(
testMatch(signedInt, "-0 foo", True, "-0"), "Re: (11) failed, expected pass"
)
self.assertTrue(
testMatch(unsignedInt, "123 foo", True, "123"),
"Re: (12) failed, expected pass",
)
self.assertTrue(
testMatch(unsignedInt, "0 foo", True, "0"), "Re: (13) failed, expected pass"
)
self.assertTrue(
testMatch(simpleString, '"foo"', True, '"foo"'),
"Re: (14) failed, expected pass",
)
self.assertTrue(
testMatch(simpleString, "'foo bar' baz", True, "'foo bar'"),
"Re: (15) failed, expected pass",
)
self.assertTrue(
testMatch(compiledRE, "blah", False), "Re: (16) passed, expected fail"
)
self.assertTrue(
testMatch(compiledRE, "BLAH", True, "BLAH"),
"Re: (17) failed, expected pass",
)
self.assertTrue(
testMatch(namedGrouping, '"foo bar" baz', True, '"foo bar"'),
"Re: (16) failed, expected pass",
)
ret = namedGrouping.parseString('"zork" blah')
print(ret)
print(list(ret.items()))
print(ret.content)
self.assertEqual("zork", ret.content, "named group lookup failed")
self.assertEqual(
simpleString.parseString('"zork" blah')[0],
ret[0],
"Regex not properly returning ParseResults for named vs. unnamed groups",
)
try:
print("lets try an invalid RE")
invRe = pp.Regex("(\"[^\"]*\")|('[^']*'")
except Exception as e:
print("successfully rejected an invalid RE:", end=" ")
print(e)
else:
self.fail("failed to reject invalid RE")
with self.assertWarns(
SyntaxWarning, msg="failed to warn empty string passed to Regex"
):
invRe = pp.Regex("")
def testRegexAsType(self):
test_str = "sldkjfj 123 456 lsdfkj"
print("return | |
from __future__ import absolute_import
from __future__ import print_function
import math
import functools
import ast
import inspect
import textwrap
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
import veriloggen.types.fixed as fxd
from veriloggen.seq.seq import make_condition
from veriloggen.fsm.fsm import FSM
from veriloggen.seq.seq import Seq
from veriloggen.stream.stream import Stream as BaseStream
from veriloggen.stream.stypes import Substream as BaseSubstream
from . import compiler
from . import thread
mode_width = 3
mode_idle = vtypes.Int(0, mode_width, base=2)
mode_normal = vtypes.Int(1, mode_width, base=2)
mode_pattern = vtypes.Int(2, mode_width, base=2)
mode_multipattern = vtypes.Int(4, mode_width, base=2)
def TmpStream(m, clk, rst,
datawidth=32, addrwidth=32,
max_pattern_length=4, ram_sel_width=8,
fsm_as_module=False):
name = compiler._tmp_name('_tmp_stream')
return Stream(m, name, clk, rst,
datawidth, addrwidth,
max_pattern_length, ram_sel_width,
fsm_as_module=False)
class Stream(BaseStream):
__intrinsics__ = ('set_source', 'set_source_pattern', 'set_source_multidim',
'set_source_multipattern', 'set_source_empty',
'set_sink', 'set_sink_pattern', 'set_sink_multidim',
'set_sink_multipattern', 'set_sink_immediate',
'set_sink_empty', 'set_constant',
'set_read_RAM', 'set_write_RAM',
'read_sink',
'run', 'join', 'done',
'source_join', 'source_done',
'sink_join', 'sink_done',
'source_join_and_run',
'enable_dump', 'disable_dump')
ram_delay = 4
def __init__(self, m, name, clk, rst,
datawidth=32, addrwidth=32,
max_pattern_length=4, max_multipattern_length=2,
ram_sel_width=8, fsm_as_module=False,
dump=False, dump_base=10, dump_mode='all'):
BaseStream.__init__(self, module=m, clock=clk, reset=rst,
no_hook=True,
dump=dump, dump_base=dump_base, dump_mode=dump_mode)
self.name = name
self.datawidth = datawidth
self.addrwidth = addrwidth
self.max_pattern_length = max_pattern_length
self.max_multipattern_length = max_multipattern_length
self.ram_sel_width = ram_sel_width
self.fsm_as_module = fsm_as_module
self.stream_synthesized = False
self.fsm_synthesized = False
self.fsm = FSM(self.module, '_%s_fsm' %
self.name, self.clock, self.reset,
as_module=self.fsm_as_module)
self.start_flag = self.module.Wire(
'_'.join(['', self.name, 'start_flag']))
self.start = self.module.Reg(
'_'.join(['', self.name, 'start']), initval=0)
self.end_flag = self.module.Reg(
'_'.join(['', self.name, 'end_flag']), initval=0)
self.term_sink = self.module.Reg(
'_'.join(['', self.name, 'term_sink']), initval=0)
self.source_busy = self.module.Reg(
'_'.join(['', self.name, 'source_busy']), initval=0)
self.sink_busy = self.module.Reg(
'_'.join(['', self.name, 'sink_busy']), initval=0)
self.sink_wait_count = None
self.reduce_reset = None
self.reduce_reset_var = None
self.sources = OrderedDict()
self.sinks = OrderedDict()
self.constants = OrderedDict()
self.substreams = []
self.read_rams = OrderedDict()
self.write_rams = OrderedDict()
self.var_name_map = OrderedDict()
self.var_id_map = OrderedDict()
self.var_id_name_map = OrderedDict()
self.var_name_id_map = OrderedDict()
self.var_id_count = 0
self.source_idle_map = OrderedDict()
self.sink_when_map = OrderedDict()
self.ram_id_count = 1 # '0' is reserved for idle
self.ram_id_map = OrderedDict() # key: ran._id(), value: count
self.fsm_id_count = 0
def source(self, name=None, datawidth=None, point=0, signed=True):
if self.stream_synthesized:
raise ValueError(
'cannot modify the stream because already synthesized')
_id = self.var_id_count
if name is None:
name = 'source_%d' % _id
if name in self.var_name_map:
raise ValueError("'%s' is already defined in stream '%s'" %
(name, self.name))
prefix = self._prefix(name)
self.var_id_count += 1
if datawidth is None:
datawidth = self.datawidth
var = self.Variable(self._dataname(name), datawidth, point, signed)
self.sources[name] = var
self.var_id_map[_id] = var
self.var_name_map[name] = var
self.var_id_name_map[_id] = name
self.var_name_id_map[name] = _id
var.source_fsm = None
var.source_pat_fsm = None
var.source_multipat_fsm = None
var.source_idle = self.module.Reg('_%s_idle' % prefix, initval=1)
self.source_idle_map[name] = var.source_idle
# 3'b000: set_source_empty, 3'b001: set_source,
# 3'b010: set_source_pattern, 3'b100: set_source_multipattern
var.source_mode = self.module.Reg('_%s_source_mode' % prefix, mode_width,
initval=mode_idle)
var.source_offset = self.module.Reg('_%s_source_offset' % prefix,
self.addrwidth, initval=0)
var.source_size = self.module.Reg('_%s_source_size' % prefix,
self.addrwidth + 1, initval=0)
var.source_stride = self.module.Reg('_%s_source_stride' % prefix,
self.addrwidth, initval=0)
var.source_count = self.module.Reg('_%s_source_count' % prefix,
self.addrwidth + 1, initval=0)
var.source_offset_buf = self.module.Reg('_%s_source_offset_buf' % prefix,
self.addrwidth, initval=0)
var.source_stride_buf = self.module.Reg('_%s_source_stride_buf' % prefix,
self.addrwidth, initval=0)
var.source_pat_cur_offsets = None
var.source_pat_sizes = None
var.source_pat_strides = None
var.source_pat_counts = None
var.source_pat_size_bufs = None
var.source_pat_stride_bufs = None
var.source_multipat_num_patterns = None
var.source_multipat_offsets = None
var.source_multipat_cur_offsets = None
var.source_multipat_sizes = None
var.source_multipat_strides = None
var.source_multipat_offset_bufs = None
var.source_multipat_size_bufs = None
var.source_multipat_stride_bufs = None
var.source_ram_id_map = OrderedDict()
var.source_ram_sel = self.module.Reg('_%s_source_ram_sel' % prefix,
self.ram_sel_width, initval=0)
var.source_ram_raddr = self.module.Reg('_%s_source_ram_raddr' % prefix,
self.addrwidth, initval=0)
var.source_ram_renable = self.module.Reg('_%s_source_ram_renable' % prefix,
initval=0)
var.source_ram_rdata = self.module.Wire('_%s_source_ram_rdata' % prefix,
datawidth)
var.source_ram_rvalid = self.module.Reg('_%s_source_ram_rvalid' % prefix,
initval=0)
var.has_source_empty = False
var.source_empty_data = self.module.Reg('_%s_source_empty_data' % prefix,
datawidth, initval=0)
self.seq(
var.source_idle(var.source_idle),
var.source_ram_rvalid(0)
)
return var
def sink(self, data, name=None, when=None, when_name=None):
if self.stream_synthesized:
raise ValueError(
'cannot modify the stream because already synthesized')
_id = self.var_id_count
if name is None:
name = 'sink_%d' % _id
if name in self.var_name_map:
raise ValueError("'%s' is already defined in stream '%s'" %
(name, self.name))
else:
data.output(self._dataname(name))
prefix = self._prefix(name)
self.var_id_count += 1
self.sinks[name] = data
self.var_id_map[_id] = data
self.var_name_map[name] = data
self.var_id_name_map[_id] = name
self.var_name_id_map[name] = _id
data.sink_fsm = None
data.sink_pat_fsm = None
data.sink_multipat_fsm = None
# 3'b001: set_sink, 3'b010: set_sink_pattern, 3'b100: set_sink_multipattern
data.sink_mode = self.module.Reg('_%s_sink_mode' % prefix, mode_width,
initval=mode_idle)
data.sink_offset = self.module.Reg('_%s_sink_offset' % prefix,
self.addrwidth, initval=0)
data.sink_size = self.module.Reg('_%s_sink_size' % prefix,
self.addrwidth + 1, initval=0)
data.sink_stride = self.module.Reg('_%s_sink_stride' % prefix,
self.addrwidth, initval=0)
data.sink_count = self.module.Reg('_%s_sink_count' % prefix,
self.addrwidth + 1, initval=0)
data.sink_offset_buf = self.module.Reg('_%s_sink_offset_buf' % prefix,
self.addrwidth, initval=0)
data.sink_stride_buf = self.module.Reg('_%s_sink_stride_buf' % prefix,
self.addrwidth, initval=0)
data.sink_pat_cur_offsets = None
data.sink_pat_sizes = None
data.sink_pat_strides = None
data.sink_pat_counts = None
data.sink_pat_size_bufs = None
data.sink_pat_stride_bufs = None
data.sink_multipat_num_patterns = None
data.sink_multipat_offsets = None
data.sink_multipat_cur_offsets = None
data.sink_multipat_sizes = None
data.sink_multipat_strides = None
data.sink_multipat_offset_bufs = None
data.sink_multipat_size_bufs = None
data.sink_multipat_stride_bufs = None
data.sink_ram_id_map = OrderedDict()
data.sink_ram_sel = self.module.Reg('_%s_sink_ram_sel' % prefix,
self.ram_sel_width, initval=0)
data.sink_ram_waddr = self.module.Reg('_%s_sink_waddr' % prefix,
self.addrwidth, initval=0)
data.sink_ram_wenable = self.module.Reg('_%s_sink_wenable' % prefix,
initval=0)
data.sink_ram_wdata = self.module.Reg('_%s_sink_wdata' % prefix,
data.width, initval=0)
# default value
self.seq(
data.sink_ram_wenable(0)
)
if when is not None:
self.sink(when, when_name)
self.sink_when_map[name] = when
def constant(self, name=None, datawidth=None, point=0, signed=True):
if self.stream_synthesized:
raise ValueError(
'cannot modify the stream because already synthesized')
_id = self.var_id_count
if name is None:
name = 'constant_%d' % _id
if name in self.var_name_map:
raise ValueError("'%s' is already defined in stream '%s'" %
(name, self.name))
prefix = self._prefix(name)
self.var_id_count += 1
if datawidth is None:
datawidth = self.datawidth
var = self.Variable(self._dataname(name), datawidth, point, signed)
self.constants[name] = var
self.var_id_map[_id] = var
self.var_name_map[name] = var
self.var_id_name_map[_id] = name
self.var_name_id_map[name] = _id
var.next_constant_data = self.module.Reg('_%s_next_constant_data' % prefix,
datawidth, initval=0)
var.next_constant_data.no_write_check = True
var.has_constant_data = False
return var
def substream(self, substrm):
sub = Substream(self.module, self.clock, self.reset, substrm, self)
self.substreams.append(sub)
return sub
def read_RAM(self, name, addr, when=None,
datawidth=None, point=0, signed=True):
if self.stream_synthesized:
raise ValueError(
'cannot modify the stream because already synthesized')
_id = self.var_id_count
if name is None:
name = 'read_ram_%d' % _id
if name in self.var_name_map:
raise ValueError("'%s' is already defined in stream '%s'" %
(name, self.name))
prefix = self._prefix(name)
self.var_id_count += 1
if datawidth is None:
datawidth = self.datawidth
var = self.ReadRAM(addr, when=when,
width=datawidth, point=point, signed=signed, ram_name=name)
self.read_rams[name] = var
self.var_id_map[_id] = var
self.var_name_map[name] = var
self.var_id_name_map[_id] = name
self.var_name_id_map[name] = _id
var.read_ram_id_map = OrderedDict()
var.read_ram_sel = self.module.Reg('_%s_read_ram_sel' % prefix,
self.ram_sel_width, initval=0)
return var
def write_RAM(self, name, addr, data, when=None):
if self.stream_synthesized:
raise ValueError(
'cannot modify the stream because already synthesized')
_id = self.var_id_count
if name is None:
name = 'write_ram_%d' % _id
if name in self.var_name_map:
raise ValueError("'%s' is already defined in stream '%s'" %
(name, self.name))
prefix = self._prefix(name)
self.var_id_count += 1
var = self.WriteRAM(addr, data, when=when, ram_name=name)
self.write_rams[name] = var
self.var_id_map[_id] = var
self.var_name_map[name] = var
self.var_id_name_map[_id] = name
self.var_name_id_map[name] = _id
var.write_ram_id_map = OrderedDict()
var.write_ram_sel = self.module.Reg('_%s_write_ram_sel' % prefix,
self.ram_sel_width, initval=0)
return var
def set_source(self, fsm, name, ram, offset, size, stride=1, port=0):
""" intrinsic method to assign RAM property to a source stream """
if not self.stream_synthesized:
self._implement_stream()
if isinstance(name, str):
var = self.var_name_map[name]
elif isinstance(name, vtypes.Str):
name = name.value
var = self.var_name_map[name]
elif isinstance(name, int):
var = self.var_id_map[name]
elif isinstance(name, vtypes.Int):
name = name.value
var = self.var_id_map[name]
else:
raise TypeError('Unsupported index name')
if name not in self.sources:
raise NameError("No such stream '%s'" % name)
set_cond = self._set_flag(fsm)
self.seq.If(set_cond)(
var.source_mode(mode_normal),
var.source_offset(offset),
var.source_size(size),
var.source_stride(stride)
)
port = vtypes.to_int(port)
self._setup_source_ram(ram, var, port, set_cond)
self._synthesize_set_source(var, name)
fsm.goto_next()
def set_source_pattern(self, fsm, name, ram, offset, pattern, port=0):
""" intrinsic method to assign RAM property to a source stream """
if not self.stream_synthesized:
self._implement_stream()
if isinstance(name, str):
var = self.var_name_map[name]
elif isinstance(name, vtypes.Str):
name = name.value
var = self.var_name_map[name]
elif isinstance(name, int):
var = self.var_id_map[name]
elif isinstance(name, vtypes.Int):
name = name.value
var = self.var_id_map[name]
else:
raise TypeError('Unsupported index name')
if name not in self.sources:
raise NameError("No such stream '%s'" % name)
if not isinstance(pattern, (tuple, list)):
raise TypeError('pattern must be list or tuple.')
if not pattern:
raise ValueError(
'pattern must have one (size, stride) pair at least.')
if not isinstance(pattern[0], (tuple, list)):
pattern = (pattern,)
pattern = tuple(pattern)
if len(pattern) > self.max_pattern_length:
raise ValueError(
"'pattern' length exceeds maximum pattern length.")
self._make_source_pattern_vars(var, name)
set_cond = self._set_flag(fsm)
self.seq.If(set_cond)(
var.source_mode(mode_pattern),
var.source_offset(offset)
)
pad = tuple([(1, 0)
for _ in range(self.max_pattern_length - len(pattern))])
for (source_pat_size, source_pat_stride,
(size, stride)) in zip(var.source_pat_sizes, | |
= SortedDict()
data_dict['q1'] = x
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
class AutoShootingIntegrandModelSimpleConv2D(shooting.ShootingLinearInParameterConvolutionIntegrand):
def __init__(self, in_features, nonlinearity=None, transpose_state_when_forward=False, concatenate_parameters=True,
nr_of_particles=10, particle_dimension=1, particle_size=2, filter_size=3, parameter_weight=None,
*args, **kwargs):
super(AutoShootingIntegrandModelSimpleConv2D, self).__init__(in_features=in_features,
nonlinearity=nonlinearity,
transpose_state_when_forward=transpose_state_when_forward,
concatenate_parameters=concatenate_parameters,
nr_of_particles=nr_of_particles,
particle_dimension=particle_dimension,
particle_size=particle_size,
parameter_weight=parameter_weight,
*args, **kwargs)
self.filter_size = filter_size
self.enlargement_dimensions = [2,3]
def create_initial_state_parameters(self,set_to_zero, *args, **kwargs):
# creates these as a sorted dictionary and returns it (need to be in the same order!!)
state_dict = SortedDict()
state_dict['q1'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
state_dict['q2'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
return state_dict
def create_default_parameter_objects(self):
parameter_objects = SortedDict()
conv1 = oc.SNN_Conv2d(in_channels=self.in_features,out_channels=self.in_features,kernel_size=self.filter_size,padding = 1, weight=self.parameter_weight)
conv2 = oc.SNN_Conv2d(in_channels=self.in_features,out_channels=self.in_features,kernel_size=self.filter_size,padding = 1, weight=self.parameter_weight)
parameter_objects['conv1'] = conv1
parameter_objects['conv2'] = conv2
return parameter_objects
def rhs_advect_state(self, t, state_dict_or_dict_of_dicts, parameter_objects):
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
p = parameter_objects
rhs['dot_q1'] = p['conv1'](self.nl(s['q2']))
rhs['dot_q2'] = p['conv2'](s['q1'])
return rhs
def get_initial_data_dict_from_data_tensor(self, x):
# Initial data dict from given data tensor
data_dict = SortedDict()
data_dict['q1'] = x
data_dict['q2'] = torch.zeros_like(x)
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
class AutoShootingIntegrandModelConv2DBatch(shooting.ShootingLinearInParameterConvolutionIntegrand):
def __init__(self, in_features, nonlinearity=None, transpose_state_when_forward=False, concatenate_parameters=True,
nr_of_particles=10, particle_dimension=1, particle_size=2, filter_size=3, parameter_weight=None,
*args, **kwargs):
super(AutoShootingIntegrandModelConv2DBatch, self).__init__(in_features=in_features,
nonlinearity=nonlinearity,
transpose_state_when_forward=transpose_state_when_forward,
concatenate_parameters=concatenate_parameters,
nr_of_particles=nr_of_particles,
particle_dimension=particle_dimension,
particle_size=particle_size,
parameter_weight=parameter_weight,
*args, **kwargs)
self.filter_size = filter_size
self.enlargement_dimensions = [2,3]
def create_initial_state_parameters(self,set_to_zero,*args,**kwargs):
# creates these as a sorted dictionary and returns it (need to be in the same order!!)
state_dict = SortedDict()
state_dict['q1'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
state_dict['q2'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
return state_dict
def create_default_parameter_objects(self):
parameter_objects = SortedDict()
conv1 = oc.SNN_Conv2d(in_channels=self.in_features,out_channels=self.in_features,kernel_size=self.filter_size,padding = 1, weight=self.parameter_weight)
conv2 = oc.SNN_Conv2d(in_channels=self.in_features,out_channels=self.in_features,kernel_size=self.filter_size,padding = 1, weight=self.parameter_weight)
#group_norm = oc.SNN_GroupNorm(self.channel_number,self.channel_number,affine = False)
group_norm = nn.GroupNorm(self.in_features,self.in_features,affine = False)
parameter_objects['conv1'] = conv1
parameter_objects['conv2'] = conv2
self.group_norm = group_norm
return parameter_objects
def rhs_advect_state(self, t, state_dict_or_dict_of_dicts, parameter_objects):
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
p = parameter_objects
rhs['dot_q1'] = p['conv1'](self.nl(s['q2']))
rhs['dot_q2'] = p["conv2"](self.group_norm(s['q1']))
return rhs
def get_initial_data_dict_from_data_tensor(self, x):
# Initial data dict from given data tensor
data_dict = SortedDict()
data_dict['q1'] = x
data_dict['q2'] = torch.zeros_like(x)
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
class AutoShootingOptimalTransportSimple(shooting.OptimalTransportNonLinearInParameter):
def __init__(self, in_features, nonlinearity=None, transpose_state_when_forward=False, concatenate_parameters=True,
nr_of_particles=10, particle_dimension=1, particle_size=2, parameter_weight=None, inflation_factor=1,
*args, **kwargs):
super(AutoShootingOptimalTransportSimple, self).__init__(in_features=in_features,
nonlinearity=nonlinearity,
transpose_state_when_forward=transpose_state_when_forward,
concatenate_parameters=concatenate_parameters,
nr_of_particles=nr_of_particles,
particle_dimension=particle_dimension,
particle_size=particle_size,
parameter_weight=parameter_weight,
*args, **kwargs)
self.inflation_factor = inflation_factor
def create_initial_state_parameters(self, set_to_zero, *args, **kwargs):
# creates these as a sorted dictionary and returns it (need to be in the same order!!)
state_dict = SortedDict()
state_dict['q1'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
state_dict['q2'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
return state_dict
def create_default_parameter_objects(self):
parameter_objects = SortedDict()
# todo: make this more generic again
dim = 2
#linear = oc.SNN_Linear(in_features=self.d, out_features=self.d)
linear = oc.SNN_Linear(in_features=self.in_features, out_features=self.inflation_factor * self.in_features, weight=self.parameter_weight)
linear2 = oc.SNN_Linear(in_features=self.inflation_factor * self.in_features, out_features=self.in_features, weight=self.parameter_weight)
parameter_objects['l1'] = linear
parameter_objects["l2"] = linear2
return parameter_objects
def rhs_advect_state(self, t, state_dict_or_dict_of_dicts, parameter_objects):
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
p = parameter_objects
rhs['dot_q1'] = p['l1'](self.nl(s["q2"]))
rhs['dot_q2'] = p['l2'](s["q1"])
return rhs
def get_initial_data_dict_from_data_tensor(self, x):
# Initial data_dict for given initial data tensor
data_dict = SortedDict()
data_dict['q1'] = x
data_dict['q2'] = torch.zeros_like(x)
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
class AutoShootingIntegrandModelUpdownDampenedQ2(shooting.ShootingLinearInParameterVectorIntegrand):
def __init__(self, in_features, nonlinearity=None, transpose_state_when_forward=False, concatenate_parameters=True,
nr_of_particles=10, particle_dimension=1, particle_size=2, parameter_weight=None, inflation_factor=5,
*args, **kwargs):
super(AutoShootingIntegrandModelUpdownDampenedQ2, self).__init__(in_features=in_features,
nonlinearity=nonlinearity,
transpose_state_when_forward=transpose_state_when_forward,
concatenate_parameters=concatenate_parameters,
nr_of_particles=nr_of_particles,
particle_dimension=particle_dimension,
particle_size=particle_size,
parameter_weight=parameter_weight,
*args, **kwargs)
self.inflation_factor = inflation_factor
self.dampening_factor = -0.5
def create_initial_state_parameters(self, set_to_zero, *args, **kwargs):
# creates these as a sorted dictionary and returns it (need to be in the same order!!)
state_dict = SortedDict()
state_dict['q1'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
# make the dimension of this state 5 times bigger
state_dict['q2'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size*self.inflation_factor,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
return state_dict
def create_default_parameter_objects(self):
parameter_objects = SortedDict()
linear1 = oc.SNN_Linear(in_features=self.in_features*self.inflation_factor,out_features=self.in_features,weight=self.parameter_weight)
linear2 = oc.SNN_Linear(in_features=self.in_features,out_features=self.in_features*self.inflation_factor,weight=self.parameter_weight)
parameter_objects['l1'] = linear1
parameter_objects['l2'] = linear2
return parameter_objects
def rhs_advect_state(self, t, state_dict_or_dict_of_dicts, parameter_objects):
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
p = parameter_objects
rhs['dot_q1'] = p['l1'](input=self.nl(s['q2'])) #- self.dampening_factor * s["q1"]
rhs['dot_q2'] = p['l2'](input=s['q1']) - self.dampening_factor * s['q2']
return rhs
def get_initial_data_dict_from_data_tensor(self, x):
# Initial data dict from given data tensor
data_dict = SortedDict()
data_dict['q1'] = x
z = torch.zeros_like(x)
#z = x.clone()
sz = [1]*len(z.shape)
sz[-1] = self.inflation_factor
data_dict['q2'] = z.repeat(sz)
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
def optional_rhs_advect_costate_analytic(self, t, state_dict_or_dict_of_dicts, costate_dict_or_dict_of_dicts, parameter_objects):
"""
This is optional. We do not need to define this. But if we do, we can sidestep computing the co-state evolution via
auto-diff. We can use this for example to test if the autodiff shooting approach properly recovers the analytic evolution equations.
:param t:
:param state_dict_of_dicts:
:param costate_dict_of_dicts:
:param parameter_objects:
:return:
"""
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
c = costate_dict_or_dict_of_dicts
p = parameter_objects
par_dict1 = p['l1'].get_parameter_dict()
par_dict2 = p['l2'].get_parameter_dict()
l1 = par_dict1['weight']
l2 = par_dict2['weight']
# now compute the parameters
q2i = s['q2']
p1i = c['p_q1']
p2i = c['p_q2']
# we are computing based on the transposed quantities here (this makes the use of torch.matmul possible
#dot_qt = torch.matmul(self.nl(qi), A.t()) + bt
# now we can also compute the rhs of the costate (based on the manually computed shooting equations)
# for i in range(self.nr_of_particles):
# dot_pt[i, ...] = -self.dnl(qi[i, ...]) * torch.matmul(pi[i, ...], A)
dot_p2t = - self.dnl(q2i) * torch.matmul(p1i,l1)
dot_p1t = - torch.matmul(p2i,l2)
rhs['dot_p_q1'] = dot_p1t #- self.dampening_factor * p1i
rhs['dot_p_q2'] = dot_p2t #- self.dampening_factor * p2i
return rhs
def optional_compute_parameters_analytic(self,t,state_dict, costate_dict):
"""
This is optional. We can prescribe an analytic computation of the parameters (where we do not need to do this via autodiff).
This is optional, but can be used for testing.
:param t:
:param parameter_objects:
:param state_dict:
:param costate_dict:
:return:
"""
s = state_dict
c = costate_dict
p = self.create_default_parameter_objects_on_consistent_device()
# now compute the parameters
q1i = s['q1']
p1i = c['p_q1']
q2i = s['q2'].transpose(1,2)
p2i = c['p_q2'].transpose(1,2)
temp = torch.matmul(self.nl(q2i),p1i)
l1 = torch.mean(temp,dim = 0).t()
temp2 = torch.matmul(p2i,q1i)
l2 = torch.mean(temp2,dim = 0)
# particles are saved as rows
#At = torch.zeros(self.in_features, self.in_features)
#for i in range(self.nr_of_particles):
# At = At + (pi[i, ...].t() * self.nl(qi[i, ...])).t()
#At = 1 / self._overall_number_of_state_parameters * At # because of the mean in the Lagrangian multiplier
#bt = 1 / self._overall_number_of_state_parameters * pi.sum(dim=0) # -\sum_i q_i
# results need to be written in the respective parameter variables
par_dict = p['l1'].get_parameter_dict()
weight_dict = p['l1'].get_parameter_weight_dict()
par_dict['weight'] = utils.divide_by_if_not_none(l1,weight_dict['weight'])
par_dict['bias'] = utils.divide_by_if_not_none(torch.mean(p1i,dim = 0),weight_dict['bias'])
par_dict2 = p['l2'].get_parameter_dict()
weight_dict2 = p['l2'].get_parameter_weight_dict()
par_dict2['weight'] = utils.divide_by_if_not_none(l2,weight_dict2['weight'])
par_dict2['bias'] = utils.divide_by_if_not_none(torch.mean(p2i,dim = 0).t(),weight_dict2['bias'])
return p
class AutoShootingIntegrandModelUpdownSymmetrized(shooting.ShootingLinearInParameterVectorIntegrand):
def __init__(self, in_features, nonlinearity=None, transpose_state_when_forward=False, concatenate_parameters=True,
nr_of_particles=10, particle_dimension=1, particle_size=2, parameter_weight=None, inflation_factor=5,
*args, **kwargs):
super(AutoShootingIntegrandModelUpdownSymmetrized, self).__init__(in_features=in_features,
nonlinearity=nonlinearity,
transpose_state_when_forward=transpose_state_when_forward,
concatenate_parameters=concatenate_parameters,
nr_of_particles=nr_of_particles,
particle_dimension=particle_dimension,
particle_size=particle_size,
parameter_weight=parameter_weight,
*args, **kwargs)
self.inflation_factor = inflation_factor
self.dampening_factor = -1.0
def create_initial_state_parameters(self, set_to_zero, *args, **kwargs):
# creates these as a sorted dictionary and returns it (need to be in the same order!!)
state_dict = SortedDict()
state_dict['q1'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
# make the dimension of this state 5 times bigger
state_dict['q2'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size*self.inflation_factor,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
state_dict['q3'] = self._state_initializer.create_parameters(nr_of_particles=self.nr_of_particles,
particle_size=self.particle_size,
particle_dimension=self.particle_dimension,
set_to_zero=set_to_zero)
return state_dict
def create_default_parameter_objects(self):
parameter_objects = SortedDict()
linear1 = oc.SNN_Linear(in_features=self.in_features*self.inflation_factor,out_features=self.in_features,weight=self.parameter_weight)
linear2 = oc.SNN_Linear(in_features=self.in_features,out_features=self.in_features*self.inflation_factor,weight=self.parameter_weight)
linear3 = oc.SNN_Linear(in_features=self.in_features,out_features=self.in_features*self.inflation_factor,weight=self.parameter_weight)
parameter_objects['l1'] = linear1
parameter_objects['l2'] = linear2
parameter_objects['l3'] = linear3
return parameter_objects
def rhs_advect_state(self, t, state_dict_or_dict_of_dicts, parameter_objects):
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
p = parameter_objects
rhs['dot_q1'] = p['l1'](input=self.nl(s['q2']))
rhs['dot_q2'] = p['l2'](input = self.nl(s['q3'])) #- self.dampening_factor * s['q2']
rhs["dot_q3"] = p["l3"](input = s["q1"]) #- self.dampening_factor * s['q3']
return rhs
def get_initial_data_dict_from_data_tensor(self, x):
# Initial data dict from given data tensor
data_dict = SortedDict()
data_dict['q1'] = x
z = torch.zeros_like(x)
z = x.clone()
sz = [1]*len(z.shape)
sz[-1] = self.inflation_factor
data_dict['q2'] = z.repeat(sz)
data_dict["q3"] = x
return data_dict
def disassemble(self,input,dim=1):
state_dict, costate_dict, data_dicts = self.disassemble_tensor(input, dim=dim)
return scd_utils.extract_key_from_dict_of_dicts(data_dicts,'q1')
def optional_rhs_advect_costate_analytic(self, t, state_dict_or_dict_of_dicts, costate_dict_or_dict_of_dicts, parameter_objects):
"""
This is optional. We do not need to define this. But if we do, we can sidestep computing the co-state evolution via
auto-diff. We can use this for example to test if the autodiff shooting approach properly recovers the analytic evolution equations.
:param t:
:param state_dict_of_dicts:
:param costate_dict_of_dicts:
:param parameter_objects:
:return:
"""
rhs = SortedDict()
s = state_dict_or_dict_of_dicts
c = costate_dict_or_dict_of_dicts
p | |
"""Initial models.
Revision ID: a2e0f8f4b344
Revises:
Create Date: 2016-11-20 23:02:51.424015
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('issuecategory',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('name', name='unique_issuecategory_name')
)
op.create_index(op.f('ix_issuecategory_created_at'), 'issuecategory', ['created_at'], unique=False)
op.create_index(op.f('ix_issuecategory_name'), 'issuecategory', ['name'], unique=False)
op.create_index(op.f('ix_issuecategory_pk'), 'issuecategory', ['pk'], unique=False)
op.create_index(op.f('ix_issuecategory_updated_at'), 'issuecategory', ['updated_at'], unique=False)
op.create_table('issueclass',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('code', sa.String(length=50), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('severity', sa.Integer(), nullable=True),
sa.Column('language', sa.String(length=50), nullable=True),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('analyzer', sa.String(length=50), nullable=True),
sa.Column('occurrence_description', sa.String(length=2000), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('code', 'analyzer', name='unique_together_issueclass_code_analyzer')
)
op.create_index(op.f('ix_issueclass_analyzer'), 'issueclass', ['analyzer'], unique=False)
op.create_index(op.f('ix_issueclass_code'), 'issueclass', ['code'], unique=False)
op.create_index(op.f('ix_issueclass_created_at'), 'issueclass', ['created_at'], unique=False)
op.create_index(op.f('ix_issueclass_hash'), 'issueclass', ['hash'], unique=False)
op.create_index(op.f('ix_issueclass_language'), 'issueclass', ['language'], unique=False)
op.create_index(op.f('ix_issueclass_occurrence_description'), 'issueclass', ['occurrence_description'], unique=False)
op.create_index(op.f('ix_issueclass_pk'), 'issueclass', ['pk'], unique=False)
op.create_index(op.f('ix_issueclass_severity'), 'issueclass', ['severity'], unique=False)
op.create_index(op.f('ix_issueclass_title'), 'issueclass', ['title'], unique=False)
op.create_index(op.f('ix_issueclass_updated_at'), 'issueclass', ['updated_at'], unique=False)
op.create_table('project',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('analysis_priority', sa.Integer(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('fetch_status', sa.String(), nullable=True),
sa.Column('analysis_status', sa.String(length=50), nullable=True),
sa.Column('analysis_requested_at', sa.DateTime(), nullable=True),
sa.Column('fetch_error', sa.Text(), nullable=True),
sa.Column('source', sa.String(length=100), nullable=False),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('analyze', sa.Boolean(), nullable=True),
sa.Column('public', sa.Boolean(), nullable=True),
sa.Column('description', sa.String(length=2000), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=True),
sa.Column('fetched_at', sa.DateTime(), nullable=True),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.Column('reset', sa.Boolean(), nullable=True),
sa.Column('permalink', sa.String(length=100), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('analyzed_at', sa.DateTime(), nullable=True),
sa.Column('reset_requested_at', sa.DateTime(), nullable=True),
sa.Column('delete', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('permalink', name=u'unique_project_permalink')
)
op.create_index(op.f('ix_project_analysis_priority'), 'project', ['analysis_priority'], unique=False)
op.create_index(op.f('ix_project_analysis_requested_at'), 'project', ['analysis_requested_at'], unique=False)
op.create_index(op.f('ix_project_analysis_status'), 'project', ['analysis_status'], unique=False)
op.create_index(op.f('ix_project_analyze'), 'project', ['analyze'], unique=False)
op.create_index(op.f('ix_project_analyzed_at'), 'project', ['analyzed_at'], unique=False)
op.create_index(op.f('ix_project_configuration'), 'project', ['configuration'], unique=False)
op.create_index(op.f('ix_project_created_at'), 'project', ['created_at'], unique=False)
op.create_index(op.f('ix_project_delete'), 'project', ['delete'], unique=False)
op.create_index(op.f('ix_project_deleted'), 'project', ['deleted'], unique=False)
op.create_index(op.f('ix_project_description'), 'project', ['description'], unique=False)
op.create_index(op.f('ix_project_fetch_status'), 'project', ['fetch_status'], unique=False)
op.create_index(op.f('ix_project_fetched_at'), 'project', ['fetched_at'], unique=False)
op.create_index(op.f('ix_project_name'), 'project', ['name'], unique=False)
op.create_index(op.f('ix_project_permalink'), 'project', ['permalink'], unique=False)
op.create_index(op.f('ix_project_pk'), 'project', ['pk'], unique=False)
op.create_index(op.f('ix_project_public'), 'project', ['public'], unique=False)
op.create_index(op.f('ix_project_reset'), 'project', ['reset'], unique=False)
op.create_index(op.f('ix_project_reset_requested_at'), 'project', ['reset_requested_at'], unique=False)
op.create_index(op.f('ix_project_source'), 'project', ['source'], unique=False)
op.create_index(op.f('ix_project_updated_at'), 'project', ['updated_at'], unique=False)
op.create_table('tag',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('name', name='unique_tag_name')
)
op.create_index(op.f('ix_tag_created_at'), 'tag', ['created_at'], unique=False)
op.create_index(op.f('ix_tag_name'), 'tag', ['name'], unique=False)
op.create_index(op.f('ix_tag_pk'), 'tag', ['pk'], unique=False)
op.create_index(op.f('ix_tag_updated_at'), 'tag', ['updated_at'], unique=False)
op.create_table('user',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('email_validated', sa.Boolean(), nullable=True),
sa.Column('superuser', sa.Boolean(), nullable=True),
sa.Column('new_email', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('email_change_requested_at', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('terms_accepted_at', sa.DateTime(), nullable=True),
sa.Column('terms_accepted', sa.Boolean(), nullable=True),
sa.Column('password_reset_code', sa.String(length=64), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('email_validation_code', sa.String(length=64), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('password_reset_requested_at', sa.DateTime(), nullable=True),
sa.Column('delete', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('email', name='unique_user_email'),
sa.UniqueConstraint('name', name='unique_user_name')
)
op.create_index(op.f('ix_user_created_at'), 'user', ['created_at'], unique=False)
op.create_index(op.f('ix_user_delete'), 'user', ['delete'], unique=False)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_index(op.f('ix_user_email_validated'), 'user', ['email_validated'], unique=False)
op.create_index(op.f('ix_user_email_validation_code'), 'user', ['email_validation_code'], unique=False)
op.create_index(op.f('ix_user_name'), 'user', ['name'], unique=False)
op.create_index(op.f('ix_user_new_email'), 'user', ['new_email'], unique=False)
op.create_index(op.f('ix_user_password_reset_code'), 'user', ['password_reset_code'], unique=False)
op.create_index(op.f('ix_user_pk'), 'user', ['pk'], unique=False)
op.create_index(op.f('ix_user_updated_at'), 'user', ['updated_at'], unique=False)
op.create_table('accesstoken',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('token', sa.String(length=64), nullable=True),
sa.Column('user', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.pk'], name='accesstoken_user_user', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_accesstoken_created_at'), 'accesstoken', ['created_at'], unique=False)
op.create_index(op.f('ix_accesstoken_pk'), 'accesstoken', ['pk'], unique=False)
op.create_index(op.f('ix_accesstoken_token'), 'accesstoken', ['token'], unique=False)
op.create_index(op.f('ix_accesstoken_updated_at'), 'accesstoken', ['updated_at'], unique=False)
op.create_index(op.f('ix_accesstoken_user'), 'accesstoken', ['user'], unique=False)
op.create_table('filerevision',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('language', sa.String(length=50), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('sha', sa.String(length=64), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('path', sa.String(length=2000), nullable=True),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'filerevision_project_project', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_filerevision_configuration'), 'filerevision', ['configuration'], unique=False)
op.create_index(op.f('ix_filerevision_created_at'), 'filerevision', ['created_at'], unique=False)
op.create_index(op.f('ix_filerevision_hash'), 'filerevision', ['hash'], unique=False)
op.create_index(op.f('ix_filerevision_language'), 'filerevision', ['language'], unique=False)
op.create_index(op.f('ix_filerevision_path'), 'filerevision', ['path'], unique=False)
op.create_index(op.f('ix_filerevision_pk'), 'filerevision', ['pk'], unique=False)
op.create_index(op.f('ix_filerevision_project'), 'filerevision', ['project'], unique=False)
op.create_index(op.f('ix_filerevision_sha'), 'filerevision', ['sha'], unique=False)
op.create_index(op.f('ix_filerevision_updated_at'), 'filerevision', ['updated_at'], unique=False)
op.create_table('issue',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('code', sa.String(length=100), nullable=False),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('analyzer', sa.String(length=100), nullable=False),
sa.Column('project', sa.String(length=32), nullable=False),
sa.Column('fingerprint', sa.String(length=255), nullable=False),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'issue_project_project', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('project', 'fingerprint', 'analyzer', 'code', name='unique_together_issue_project_fingerprint_analyzer_code')
)
op.create_index(op.f('ix_issue_analyzer'), 'issue', ['analyzer'], unique=False)
op.create_index(op.f('ix_issue_code'), 'issue', ['code'], unique=False)
op.create_index(op.f('ix_issue_configuration'), 'issue', ['configuration'], unique=False)
op.create_index(op.f('ix_issue_created_at'), 'issue', ['created_at'], unique=False)
op.create_index(op.f('ix_issue_fingerprint'), 'issue', ['fingerprint'], unique=False)
op.create_index(op.f('ix_issue_hash'), 'issue', ['hash'], unique=False)
op.create_index(op.f('ix_issue_pk'), 'issue', ['pk'], unique=False)
op.create_index(op.f('ix_issue_project'), 'issue', ['project'], unique=False)
op.create_index(op.f('ix_issue_updated_at'), 'issue', ['updated_at'], unique=False)
op.create_table('issueclass_issuecategory_categories',
sa.Column('issuecategory', sa.String(length=32), nullable=True),
sa.Column('issueclass', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['issuecategory'], ['issuecategory.pk'], name='issueclass_issuecategory_categories_issuecategory', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['issueclass'], ['issueclass.pk'], name='issueclass_issuecategory_categories_issueclass', ondelete='CASCADE'),
sa.UniqueConstraint('issueclass', 'issuecategory', name='issueclass_issuecategory_categories_categories_unique')
)
op.create_index(op.f('ix_issueclass_issuecategory_categories_issuecategory'), 'issueclass_issuecategory_categories', ['issuecategory'], unique=False)
op.create_index(op.f('ix_issueclass_issuecategory_categories_issueclass'), 'issueclass_issuecategory_categories', ['issueclass'], unique=False)
op.create_table('issueclass_tag_tags',
sa.Column('tag', sa.String(length=32), nullable=True),
sa.Column('issueclass', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['issueclass'], ['issueclass.pk'], name='issueclass_tag_tags_issueclass', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['tag'], ['tag.pk'], name='issueclass_tag_tags_tag', ondelete='CASCADE'),
sa.UniqueConstraint('issueclass', 'tag', name='issueclass_tag_tags_tags_unique')
)
op.create_index(op.f('ix_issueclass_tag_tags_issueclass'), 'issueclass_tag_tags', ['issueclass'], unique=False)
op.create_index(op.f('ix_issueclass_tag_tags_tag'), 'issueclass_tag_tags', ['tag'], unique=False)
op.create_table('project_tag_tags',
sa.Column('tag', sa.String(length=32), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'project_tag_tags_project', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['tag'], ['tag.pk'], name=u'project_tag_tags_tag', ondelete='CASCADE'),
sa.UniqueConstraint('project', 'tag', name=u'project_tag_tags_tags_unique')
)
op.create_index(op.f('ix_project_tag_tags_project'), 'project_tag_tags', ['project'], unique=False)
op.create_index(op.f('ix_project_tag_tags_tag'), 'project_tag_tags', ['tag'], unique=False)
op.create_table('projectissueclass',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('issue_class', sa.String(length=32), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['issue_class'], ['issueclass.pk'], name='projectissueclass_issueclass_issue_class', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'projectissueclass_project_project', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('project', 'issue_class', name='unique_together_projectissueclass_project_issue_class')
)
op.create_index(op.f('ix_projectissueclass_created_at'), 'projectissueclass', ['created_at'], unique=False)
op.create_index(op.f('ix_projectissueclass_issue_class'), 'projectissueclass', ['issue_class'], unique=False)
op.create_index(op.f('ix_projectissueclass_pk'), 'projectissueclass', ['pk'], unique=False)
op.create_index(op.f('ix_projectissueclass_project'), 'projectissueclass', ['project'], unique=False)
op.create_index(op.f('ix_projectissueclass_updated_at'), 'projectissueclass', ['updated_at'], unique=False)
op.create_table('snapshot',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('analyzed', sa.Boolean(), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'snapshot_project_project', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_snapshot_analyzed'), 'snapshot', ['analyzed'], unique=False)
op.create_index(op.f('ix_snapshot_configuration'), 'snapshot', ['configuration'], unique=False)
op.create_index(op.f('ix_snapshot_created_at'), 'snapshot', ['created_at'], unique=False)
op.create_index(op.f('ix_snapshot_hash'), 'snapshot', ['hash'], unique=False)
op.create_index(op.f('ix_snapshot_pk'), 'snapshot', ['pk'], unique=False)
op.create_index(op.f('ix_snapshot_project'), 'snapshot', ['project'], unique=False)
op.create_index(op.f('ix_snapshot_updated_at'), 'snapshot', ['updated_at'], unique=False)
op.create_table('task',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('status', sa.String(length=50), nullable=True),
sa.Column('last_ping', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'task_project_project', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_task_created_at'), 'task', ['created_at'], unique=False)
op.create_index(op.f('ix_task_last_ping'), 'task', ['last_ping'], unique=False)
op.create_index(op.f('ix_task_pk'), 'task', ['pk'], unique=False)
op.create_index(op.f('ix_task_project'), 'task', ['project'], unique=False)
op.create_index(op.f('ix_task_status'), 'task', ['status'], unique=False)
op.create_index(op.f('ix_task_type'), 'task', ['type'], unique=False)
op.create_index(op.f('ix_task_updated_at'), 'task', ['updated_at'], unique=False)
op.create_table('userrole',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('role', sa.String(length=30), nullable=True),
sa.Column('user', sa.String(length=32), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'userrole_project_project', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user'], ['user.pk'], name='userrole_user_user', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_userrole_created_at'), 'userrole', ['created_at'], unique=False)
op.create_index(op.f('ix_userrole_pk'), 'userrole', ['pk'], unique=False)
op.create_index(op.f('ix_userrole_project'), 'userrole', ['project'], unique=False)
op.create_index(op.f('ix_userrole_role'), 'userrole', ['role'], unique=False)
op.create_index(op.f('ix_userrole_updated_at'), 'userrole', ['updated_at'], unique=False)
op.create_index(op.f('ix_userrole_user'), 'userrole', ['user'], unique=False)
op.create_table('diff',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('snapshot_b', sa.String(length=32), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project', sa.String(length=32), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.Column('snapshot_a', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['project'], [u'project.pk'], name=u'diff_project_project', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['snapshot_a'], ['snapshot.pk'], name='diff_snapshot_snapshot_a', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['snapshot_b'], ['snapshot.pk'], name='diff_snapshot_snapshot_b', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_diff_configuration'), 'diff', ['configuration'], unique=False)
op.create_index(op.f('ix_diff_created_at'), 'diff', ['created_at'], unique=False)
op.create_index(op.f('ix_diff_hash'), 'diff', ['hash'], unique=False)
op.create_index(op.f('ix_diff_pk'), 'diff', ['pk'], unique=False)
op.create_index(op.f('ix_diff_project'), 'diff', ['project'], unique=False)
op.create_index(op.f('ix_diff_snapshot_a'), 'diff', ['snapshot_a'], unique=False)
op.create_index(op.f('ix_diff_snapshot_b'), 'diff', ['snapshot_b'], unique=False)
op.create_index(op.f('ix_diff_updated_at'), 'diff', ['updated_at'], unique=False)
op.create_table('disksnapshot',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('snapshot', sa.String(length=32), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['snapshot'], ['snapshot.pk'], name='disksnapshot_snapshot_snapshot', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk'),
sa.UniqueConstraint('snapshot', name='unique_disksnapshot_snapshot')
)
op.create_index(op.f('ix_disksnapshot_created_at'), 'disksnapshot', ['created_at'], unique=False)
op.create_index(op.f('ix_disksnapshot_pk'), 'disksnapshot', ['pk'], unique=False)
op.create_index(op.f('ix_disksnapshot_snapshot'), 'disksnapshot', ['snapshot'], unique=False)
op.create_index(op.f('ix_disksnapshot_updated_at'), 'disksnapshot', ['updated_at'], unique=False)
op.create_table('filerevision_filerevision_dependencies',
sa.Column('filerevision_right', sa.String(length=32), nullable=True),
sa.Column('filerevision', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['filerevision'], [u'filerevision.pk'], name=u'filerevision_filerevision_dependencies_filerevision', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['filerevision_right'], [u'filerevision.pk'], name=u'filerevision_filerevision_dependencies_filerevision_right', ondelete='CASCADE'),
sa.UniqueConstraint('filerevision', 'filerevision_right', name=u'filerevision_filerevision_dependencies_dependencies_unique')
)
op.create_index(op.f('ix_filerevision_filerevision_dependencies_filerevision'), 'filerevision_filerevision_dependencies', ['filerevision'], unique=False)
op.create_index(op.f('ix_filerevision_filerevision_dependencies_filerevision_right'), 'filerevision_filerevision_dependencies', ['filerevision_right'], unique=False)
op.create_table('issueoccurrence',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('sequence', sa.Integer(), nullable=True),
sa.Column('to_column', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('to_row', sa.Integer(), nullable=True),
sa.Column('file_revision', sa.String(length=32), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('from_row', sa.Integer(), nullable=True),
sa.Column('issue', sa.String(length=32), nullable=True),
sa.Column('from_column', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['file_revision'], [u'filerevision.pk'], name=u'issueoccurrence_filerevision_file_revision', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['issue'], ['issue.pk'], name='issueoccurrence_issue_issue', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_issueoccurrence_created_at'), 'issueoccurrence', ['created_at'], unique=False)
op.create_index(op.f('ix_issueoccurrence_file_revision'), 'issueoccurrence', ['file_revision'], unique=False)
op.create_index(op.f('ix_issueoccurrence_hash'), 'issueoccurrence', ['hash'], unique=False)
op.create_index(op.f('ix_issueoccurrence_issue'), 'issueoccurrence', ['issue'], unique=False)
op.create_index(op.f('ix_issueoccurrence_pk'), 'issueoccurrence', ['pk'], unique=False)
op.create_index(op.f('ix_issueoccurrence_updated_at'), 'issueoccurrence', ['updated_at'], unique=False)
op.create_table('snapshot_filerevision_file_revisions',
sa.Column('filerevision', sa.String(length=32), nullable=True),
sa.Column('snapshot', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['filerevision'], [u'filerevision.pk'], name=u'snapshot_filerevision_file_revisions_filerevision', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['snapshot'], ['snapshot.pk'], name=u'snapshot_filerevision_file_revisions_snapshot', ondelete='CASCADE'),
sa.UniqueConstraint('snapshot', 'filerevision', name=u'snapshot_filerevision_file_revisions_file_revisions_unique')
)
op.create_index(op.f('ix_snapshot_filerevision_file_revisions_filerevision'), 'snapshot_filerevision_file_revisions', ['filerevision'], unique=False)
op.create_index(op.f('ix_snapshot_filerevision_file_revisions_snapshot'), 'snapshot_filerevision_file_revisions', ['snapshot'], unique=False)
op.create_table('difffilerevision',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_revision', sa.String(length=32), nullable=True),
sa.Column('key', sa.Enum(u'added', u'deleted', u'modified', name='difffilerevision_key', native_enum=False), nullable=True),
sa.Column('pk', sa.String(length=32), nullable=False),
sa.Column('diff', sa.String(length=32), nullable=True),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['diff'], ['diff.pk'], name='difffilerevision_diff_diff', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['file_revision'], [u'filerevision.pk'], name=u'difffilerevision_filerevision_file_revision', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('pk')
)
op.create_index(op.f('ix_difffilerevision_configuration'), 'difffilerevision', ['configuration'], unique=False)
op.create_index(op.f('ix_difffilerevision_created_at'), 'difffilerevision', ['created_at'], unique=False)
op.create_index(op.f('ix_difffilerevision_diff'), 'difffilerevision', ['diff'], unique=False)
op.create_index(op.f('ix_difffilerevision_file_revision'), 'difffilerevision', ['file_revision'], unique=False)
op.create_index(op.f('ix_difffilerevision_hash'), 'difffilerevision', ['hash'], unique=False)
op.create_index(op.f('ix_difffilerevision_pk'), 'difffilerevision', ['pk'], unique=False)
op.create_index(op.f('ix_difffilerevision_updated_at'), 'difffilerevision', ['updated_at'], unique=False)
op.create_table('diffissueoccurrence',
sa.Column('data', sa.LargeBinary(), nullable=True),
sa.Column('configuration', sa.String(length=64), nullable=True),
sa.Column('hash', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('key', sa.Enum(u'added', u'fixed', name='diffissueoccurrence_key', native_enum=False), nullable=True),
sa.Column('pk', sa.String(length=32), | |
find_nearest is probably very slowly...
### Using startidx values to speed up the process at least for later data
# Get start and end indicies:
if debug:
ti1 = datetime.utcnow()
st, ls = self.findtime(startdate,mode='argmax')
# st is the starttime, ls ? -- modification allow to provide key list!!
if debug:
ti2 = datetime.utcnow()
print ("flag_stream: findtime duration", ti2-ti1)
#if debug:
# ti1 = datetime.utcnow()
# testls = nonzero(self.ndarray[0]==startdate)
# ti2 = datetime.utcnow()
# print ("Findtime duration -alternative", ti2-ti1)
if st == 0:
#print("Flag_stream: slowly start",st)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
ls,st = find_nearest(self.ndarray[0],start)
sti = st-2
if sti < 0:
sti = 0
ed, le = self.findtime(enddate,startidx=sti,mode='argmax')
if ed == 0:
#print("Flag_stream: slowly end",ed)
if not sr == 0:
# Determine sampling rate if not done yet
start,end = rangeExtend(startdate,enddate,sr)
le, ed = find_nearest(self.ndarray[0],end) ### TODO use startundex here as well
if ed == len(self.ndarray[0]):
ed = ed-1
# Create a defaultflag
defaultflag = ['-' for el in FLAGKEYLIST]
if debug:
ti3 = datetime.utcnow()
print ("Full Findtime duration", ti3-ti1)
print("flagging", st, ed)
if ndtype:
array = [[] for el in KEYLIST]
flagind = KEYLIST.index('flag')
commentind = KEYLIST.index('comment')
# Check whether flag and comment are exisiting - if not create empty
if not len(self.ndarray[flagind]) > 0:
array[flagind] = [''] * len(self.ndarray[0])
else:
array[flagind] = list(self.ndarray[flagind])
if not len(self.ndarray[commentind]) > 0:
array[commentind] = [''] * len(self.ndarray[0])
else:
array[commentind] = list(self.ndarray[commentind])
# Now either modify existing or add new flag
if st==0 and ed==0:
pass
else:
t3a = datetime.utcnow()
for i in range(st,ed+1):
#if self.ndarray[flagind][i] == '' or self.ndarray[flagind][i] == '-':
if array[flagind][i] == '' or array[flagind][i] == '-':
flagls = defaultflag
else:
flagls = list(array[flagind][i])
# if existing flaglistlength is shorter, because new columns where added later to ndarray
if len(flagls) < pos:
flagls.extend(['-' for j in range(pos+1-flagls)])
flagls[pos] = str(flag)
array[flagind][i] = ''.join(flagls)
array[commentind][i] = comment
self.ndarray[flagind] = np.array(array[flagind], dtype=np.object)
self.ndarray[commentind] = np.array(array[commentind], dtype=np.object)
# up to 0.3.98 the following code was used (~10 times slower)
# further significant speed up requires some structural changes:
# 1. use keylist here
#self.ndarray[flagind] = np.asarray(array[flagind]).astype(object)
#self.ndarray[commentind] = np.asarray(array[commentind]).astype(object)
else:
for elem in self:
if elem.time >= start and elem.time <= end:
fllist = list(elem.flag)
if not len(fllist) > 1:
fllist = defaultflag
fllist[pos] = str(flag)
elem.flag=''.join(fllist)
elem.comment = comment
if flag == 1 or flag == 3 and debug:
if enddate:
#print ("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat(),enddate.isoformat(),comment))
try:
logger.info("flag_stream: Flagged data from %s to %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),enddate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
else:
try:
logger.info("flag_stream: Flagged data at %s -> (%s)" % (startdate.isoformat().encode('ascii','ignore'),comment.encode('ascii','ignore')))
except:
pass
return self
def simplebasevalue2stream(self,basevalue,**kwargs):
"""
DESCRIPTION:
simple baselvalue correction using a simple basevalue list
PARAMETERS:
basevalue (list): [baseH,baseD,baseZ]
keys (list): default = 'x','y','z'
APPLICTAION:
used by stream.baseline
"""
mode = kwargs.get('mode')
keys = ['<KEY>']
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("simplebasevalue2stream: requires ndarray")
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
# get x array for baseline
#indx = KEYLIST.index('x')
for key in KEYLIST:
ind = KEYLIST.index(key)
if key in keys: # new
#print keys.index(key)
ar = self.ndarray[ind].astype(float)
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + basevalue[keys.index(key)]
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
array[ind] = ar + basevalue[keys.index(key)]
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = self.ndarray[ind].astype(object)
self.header['DataComponents'] = 'HDZ'
return DataStream(self,self.header,np.asarray(array))
def func2stream(self,funclist,**kwargs):
"""
DESCRIPTION:
combine data stream and functions obtained by fitting and interpolation. Possible combination
modes are 'add' (default), subtract 'sub', divide 'div' and 'multiply'. Furthermore, the
function values can replace the original values at the given timesteps of the stream
PARAMETERS:
funclist (list of functions): required - each function is an output of stream.fit or stream.interpol
#function (function): required - output of stream.fit or stream.interpol
keys (list): default = '<KEY>'
mode (string): one of 'add','sub','div','multiply','values' - default = 'add'
APPLICTAION:
used by stream.baseline
"""
keys = kwargs.get('keys')
fkeys = kwargs.get('fkeys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if fkeys and not len(fkeys) == len(keys):
fkeys=None
logger.warning("func2stream: provided fkeys do not match keys")
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist # TODO: cycle through list
totalarray = [[] for key in KEYLIST]
posstr = KEYLIST.index('str1')
testx = []
for function in funct:
#print ("Testing", function)
if not function:
return self
# Changed that - 49 sec before, no less then 2 secs
if not len(self.ndarray[0]) > 0:
print("func2stream: requires ndarray - trying old LineStruct functions")
if mode == 'add':
return self.func_add(function, keys=keys)
elif mode == 'sub':
return self.func_subtract(function, keys=keys)
else:
return self
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
dis_done = False
# get x array for baseline
#indx = KEYLIST.index('x')
#arrayx = self.ndarray[indx].astype(float)
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
for key in KEYLIST:
validkey = False
ind = KEYLIST.index(key)
if key in keys: # new
#print ("DEALING: ", key)
keyind = keys.index(key)
if fkeys:
fkey = fkeys[keyind]
else:
fkey = key
ar = np.asarray(self.ndarray[ind]).astype(float)
try:
test = function[0]['f'+fkey](functimearray)
validkey = True
except:
pass
if mode == 'add' and validkey:
print ("here", ar, function[0]['f'+fkey](functimearray))
array[ind] = ar + function[0]['f'+fkey](functimearray)
elif mode == 'addbaseline' and validkey:
if key == 'y':
#indx = KEYLIST.index('x')
#Hv + Hb; Db + atan2(y,H_corr) Zb + Zv
#print type(self.ndarray[ind]), key, self.ndarray[ind]
array[ind] = np.arctan2(np.asarray(list(ar)),np.asarray(list(arrayx)))*180./np.pi + function[0]['f'+fkey](functimearray)
self.header['col-y'] = 'd'
self.header['unit-col-y'] = 'deg'
else:
#print("func2stream", function, function[0], function[0]['f'+key],functimearray)
array[ind] = ar + function[0]['f'+fkey](functimearray)
if len(array[posstr]) == 0:
#print ("Assigned values to str1: function {}".format(function[1]))
array[posstr] = ['c']*len(ar)
if len(testx) > 0 and not dis_done:
# identify change from number to nan
# add discontinuity marker there
#print ("Here", testx)
prevel = np.nan
for idx, el in enumerate(testx):
if not np.isnan(prevel) and np.isnan(el):
array[posstr][idx] = 'd'
#print ("Modified str1 at {}".format(idx))
break
prevel = el
dis_done = True
if key == 'x': # remember this for correct y determination
arrayx = array[ind]
testx = function[0]['f'+fkey](functimearray)
if key == 'dx': # use this column to test if delta values are already provided
testx = function[0]['f'+fkey](functimearray)
elif mode in ['sub','subtract'] and validkey:
array[ind] = ar - function[0]['f'+fkey](functimearray)
elif mode == 'values' and validkey:
array[ind] = function[0]['f'+fkey](functimearray)
elif mode == 'div' and validkey:
array[ind] = ar / function[0]['f'+fkey](functimearray)
elif mode == 'multiply' and validkey:
array[ind] = ar * function[0]['f'+fkey](functimearray)
elif validkey:
print("func2stream: mode not recognized")
else: # new
if len(self.ndarray[ind]) > 0:
array[ind] = np.asarray(self.ndarray[ind]).astype(object)
for idx, col in enumerate(array):
if len(totalarray[idx]) > 0 and not idx == 0:
totalcol = totalarray[idx]
for j,el in enumerate(col):
if idx < len(NUMKEYLIST)+1 and not np.isnan(el) and np.isnan(totalcol[j]):
totalarray[idx][j] = array[idx][j]
if idx > len(NUMKEYLIST) and not el == 'c' and totalcol[j] == 'c':
totalarray[idx][j] = 'd'
else:
totalarray[idx] = array[idx]
return DataStream(self,self.header,np.asarray(totalarray,dtype=object))
def func_add(self,funclist,**kwargs):
"""
Add a function to the selected values of the data stream -> e.g. get baseline
Optional:
keys (default = 'x','y','z')
"""
keys = kwargs.get('keys')
mode = kwargs.get('mode')
if not keys:
keys = ['<KEY>']
if not mode:
mode = 'add'
if isinstance(funclist[0], dict):
funct = [funclist]
else:
funct = funclist
function = funct[0] # Direct call of old version only accepts single function
# Changed that - 49 sec before, no less then 2 secs
if len(self.ndarray[0]) > 0:
#1. calculate function value for each data time step
array = [[] for key in KEYLIST]
array[0] = self.ndarray[0]
functimearray = (self.ndarray[0].astype(float)-function[1])/(function[2]-function[1])
#print functimearray
for key in keys:
ind = KEYLIST.index(key)
| |
<reponame>martinjzhang/adafdr
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import mlab
from scipy.stats import beta
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from numpy import array
from scipy.cluster.vq import kmeans2
from sklearn.cluster import KMeans
def generate_data_1D(job=0, n_samples=10000,data_vis=0, num_case=4):
if job == 0: # discrete case
pi1=np.random.uniform(0,0.3,size=num_case)
X=np.random.randint(0, num_case, n_samples)
p = np.zeros(n_samples)
h = np.zeros(n_samples)
for i in range(n_samples):
rnd = np.random.uniform()
if rnd > pi1[X[i]]:
p[i] = np.random.uniform()
h[i] = 0
else:
p[i] = np.random.beta(a = np.random.uniform(0.2,0.4), b = 4)
h[i] = 1
return p, h, X
def generate_data_1D_cont(pi1, X, job=0):
if job == 0: # discrete case
n_samples = len(X)
p = np.zeros(n_samples)
h = np.zeros(n_samples)
for i in range(n_samples):
rnd = np.random.uniform()
if rnd > pi1[i]:
p[i] = np.random.uniform()
h[i] = 0
else:
p[i] = np.random.beta(a = np.random.uniform(0.2,0.4), b = 4)
h[i] = 1
return p, h, X
#def p_value_beta_fit(p, lamb=0.8, bin_num=50, vis=0):
# pi_0=np.divide(np.sum(p>lamb), p.shape[0] * (1-lamb))
# temp_p=np.zeros([0])
# step_size=np.divide(1,np.float(bin_num))
# fil_num=np.int(np.divide(pi_0*p.shape[0],bin_num))+1
# for i in range(bin_num):
# p1=p[p>step_size*(i-1)]
# p1=p1[p1 <= step_size*i]
# choice_num= np.max(p1.shape[0] - fil_num,0)
# if choice_num > 1:
# choice=np.random.choice(p1.shape[0], choice_num)
# temp_p=np.concatenate([temp_p,p1[choice]]).T
# if vis==1:
# plt.figure()
# plt.hist(temp_p, bins=100, normed=True)
# a, b, loc, scale = beta.fit(temp_p,floc=0,fscale=1)
# return pi_0, a, b
#def beta_mixture_pdf(x,pi_0,a,b):
# return beta.pdf(x,a,b)*(1-pi_0)+pi_0
#
#def Storey_BH(x, alpha = 0.05, lamb=0.4, n = None):
# pi0_hat=np.divide(np.sum(x>lamb),x.shape[0] *(1-lamb))
# alpha /= pi0_hat
# x_s = sorted(x)
# if n is None:
# n = len(x_s)
# ic = 0
# for i in range(len(x_s)):
# if x_s[i] < i*alpha/float(n):
# ic = i
# return ic, x_s[ic], pi0_hat
#def Opt_t_cal_discrete(p, X, num_case=2,step_size=0.0001,ub=0.05,n_samples=10000,alpha=0.05):
# # Fit the beta mixture parameters
# fit_param=np.zeros([num_case, 3])
# for i in range(num_case):
# fit_param[i,:]=p_value_beta_fit(p[X==i])
#
# # Calculating the ratios
# t_opt=np.zeros([num_case])
# max_idx=np.argmin(fit_param[:,0])
# x_grid = np.arange(0, ub, step_size)
# t_ratio=np.zeros([num_case,x_grid.shape[0]])
# for i in range(num_case):
# t_ratio[i,:] = np.divide(beta_mixture_pdf(
# x_grid,fit_param[i,0],fit_param[i,1],fit_param[i,2]), fit_param[i,0])
#
# # Increase the threshold
# for i in range(len(x_grid)):
# t=np.zeros([num_case])
# # undate the search optimal threshold
# t[max_idx]=x_grid[i]
# c=t_ratio[max_idx,i]
# for j in range(num_case):
# if j != max_idx:
# for k in range(len(x_grid)):
# if k == len(x_grid)-1:
# t[j]=x_grid[k]
# break
# if t_ratio[j,k+1]<c:
# t[j]=x_grid[k]
# break
# # calculate the FDR
# num_dis=0
# num_fd =0
# for i in range(num_case):
# num_dis+=np.sum(p[X==i] < t[i])
# num_fd+=np.sum(X==i)*t[i]*fit_param[i,0]
#
# if np.divide(num_fd,np.float(np.amax([num_dis,1])))<alpha:
# t_opt=t
# else:
# break
# return t_opt
def generate_data_2D(job=0, n_samples=10000,data_vis=0):
if job == 0: # Gaussian mixtures
x1 = np.random.uniform(-1,1,size = n_samples)
x2 = np.random.uniform(-1,1,size = n_samples)
pi1 = ((mlab.bivariate_normal(x1, x2, 0.25, 0.25, -0.5, -0.2)+
mlab.bivariate_normal(x1, x2, 0.25, 0.25, 0.7, 0.5))/2).clip(max=1)
p = np.zeros(n_samples)
h = np.zeros(n_samples)
for i in range(n_samples):
rnd = np.random.uniform()
if rnd > pi1[i]:
p[i] = np.random.uniform()
h[i] = 0
else:
p[i] = np.random.beta(a = 0.3, b = 4)
h[i] = 1
X = np.concatenate([[x1],[x2]]).T
if data_vis == 1:
fig = plt.figure()
ax1 = fig.add_subplot(121)
x_grid = np.arange(-1, 1, 1/100.0)
y_grid = np.arange(-1, 1, 1/100.0)
X_grid, Y_grid = np.meshgrid(x_grid, y_grid)
pi1_grid = ((mlab.bivariate_normal(X_grid, Y_grid, 0.25, 0.25, -0.5, -0.2)+
mlab.bivariate_normal(X_grid, Y_grid, 0.25, 0.25, 0.7, 0.5))/2).clip(max=1)
ax1.pcolor(X_grid, Y_grid, pi1_grid)
ax2 = fig.add_subplot(122)
alt=ax2.scatter(x1[h==1][1:50], x2[h==1][1:50],color='r')
nul=ax2.scatter(x1[h==0][1:50], x2[h==0][1:50],color='b')
ax2.legend((alt,nul),('50 alternatives', '50 nulls'))
return p, h, X
if job == 1: # Linear trend
x1 = np.random.uniform(-1,1,size = n_samples)
x2 = np.random.uniform(-1,1,size = n_samples)
pi1 = 0.1 * (x1 + 1) /2 + 0.3 *(1-x2) / 2
p = np.zeros(n_samples)
h = np.zeros(n_samples)
for i in range(n_samples):
rnd = np.random.uniform()
if rnd > pi1[i]:
p[i] = np.random.uniform()
h[i] = 0
else:
p[i] = np.random.beta(a = 0.3, b = 4)
h[i] = 1
X = np.concatenate([[x1],[x2]]).T
if data_vis == 1:
fig = plt.figure()
ax1 = fig.add_subplot(121)
x_grid = np.arange(-1, 1, 1/100.0)
y_grid = np.arange(-1, 1, 1/100.0)
X_grid, Y_grid = np.meshgrid(x_grid, y_grid)
pi1_grid = 0.1 * (X_grid + 1) /2 + 0.3 *(1-Y_grid) / 2
ax1.pcolor(X_grid, Y_grid, pi1_grid)
ax2 = fig.add_subplot(122)
alt=ax2.scatter(x1[h==1][1:50], x2[h==1][1:50],color='r')
nul=ax2.scatter(x1[h==0][1:50], x2[h==0][1:50],color='b')
ax2.legend((alt,nul),('50 alternatives', '50 nulls'))
return p, h, X
if job == 2: # Gaussian mixture + linear trend
x1 = np.random.uniform(-1,1,size = n_samples)
x2 = np.random.uniform(-1,1,size = n_samples)
pi1 = ((mlab.bivariate_normal(x1, x2, 0.25, 0.25, -0.5, -0.2)+
mlab.bivariate_normal(x1, x2, 0.25, 0.25, 0.7, 0.5))/2).clip(max=1)
pi1 = pi1 * 0.5 + 0.5*(0.5 * (x1 + 1) /2 + 0.3 *(1-x2) / 2)
p = np.zeros(n_samples)
h = np.zeros(n_samples)
for i in range(n_samples):
rnd = np.random.uniform()
if rnd > pi1[i]:
p[i] = np.random.uniform()
h[i] = 0
else:
p[i] = np.random.beta(a = 0.3, b = 4)
h[i] = 1
X = np.concatenate([[x1],[x2]]).T
if data_vis == 1:
fig = plt.figure()
ax1 = fig.add_subplot(121)
x_grid = np.arange(-1, 1, 1/100.0)
y_grid = np.arange(-1, 1, 1/100.0)
X_grid, Y_grid = np.meshgrid(x_grid, y_grid)
pi1_grid = ((mlab.bivariate_normal(X_grid, Y_grid, 0.25, 0.25, -0.5, -0.2)+
mlab.bivariate_normal(X_grid, Y_grid, 0.25, 0.25, 0.7, 0.5))/2).clip(max=1) * 0.5 + (0.5 * (0.5 * (X_grid + 1) /2 + 0.3 *(1-Y_grid) / 2))
ax1.pcolor(X_grid, Y_grid, pi1_grid)
ax2 = fig.add_subplot(122)
alt=ax2.scatter(x1[h==1][1:50], x2[h==1][1:50],color='r')
nul=ax2.scatter(x1[h==0][1:50], x2[h==0][1:50],color='b')
ax2.legend((alt,nul),('50 alternatives', '50 nulls'))
return p, h, X
#def BH(x, alpha = 0.05, n = None):
# x_s = sorted(x)
# if n is None:
# n = len(x_s)
# ic = 0
# for i in range(len(x_s)):
# if x_s[i] < i*alpha/float(n):
# ic = i
# return ic, x_s[ic]
#def p_value_beta_fit(p, lamb=0.8, bin_num=50, vis=0):
# pi_0=np.divide(np.sum(p>lamb), p.shape[0] * (1-lamb))
# temp_p=np.zeros([0])
# step_size=np.divide(1,np.float(bin_num))
# fil_num=np.int(np.divide(pi_0*p.shape[0],bin_num))+1
# for i in range(bin_num):
# p1=p[p>step_size*(i-1)]
# p1=p1[p1 <= step_size*i]
# choice_num= np.max(p1.shape[0] - fil_num,0)
# if choice_num > 1:
# choice=np.random.choice(p1.shape[0], choice_num)
# temp_p=np.concatenate([temp_p,p1[choice]]).T
# if vis==1:
# plt.figure()
# plt.hist(temp_p, bins=100, normed=True)
# a, b, loc, scale = beta.fit(temp_p,floc=0,fscale=1)
# return pi_0, a, b
#def beta_mixture_pdf(x,pi_0,a,b):
# return beta.pdf(x,a,b)*(1-pi_0)+pi_0
#
#def Opt_t_cal_discrete(p, X, num_case=2,step_size=0.0001,ub=0.05,n_samples=10000,alpha=0.05):
# # Fit the beta mixture parameters
# fit_param=np.zeros([num_case, 3])
# for i in range(num_case):
# fit_param[i,:]=p_value_beta_fit(p[X==i])
#
# # Calculating the ratios
# t_opt=np.zeros([num_case])
# max_idx=np.argmin(fit_param[:,0])
# x_grid = np.arange(0, ub, step_size)
# t_ratio=np.zeros([num_case,x_grid.shape[0]])
# for i in range(num_case):
# t_ratio[i,:] = np.divide(beta_mixture_pdf(
# x_grid,fit_param[i,0],fit_param[i,1],fit_param[i,2]), fit_param[i,0])
#
# # Increase the threshold
# for i in range(len(x_grid)):
# t=np.zeros([num_case])
# # undate the search optimal threshold
# t[max_idx]=x_grid[i]
# c=t_ratio[max_idx,i]
# for j in range(num_case):
# if j != max_idx:
# for k in range(len(x_grid)):
# if k == len(x_grid)-1:
# t[j]=x_grid[k]
# break
# if t_ratio[j,k+1]<c:
# t[j]=x_grid[k]
# break
# # calculate the FDR
# num_dis=0
# num_fd =0
# for i in range(num_case):
# num_dis+=np.sum(p[X==i] < t[i])
# num_fd+=np.sum(X==i)*t[i]*fit_param[i,0]
#
# if np.divide(num_fd,np.float(np.amax([num_dis,1])))<alpha:
# t_opt=t
# else:
# break
# return t_opt
def result_summary(h,pred):
print("Num of alternatives:",np.sum(h))
print("Num of discovery:",np.sum(pred))
print("Num of true discovery:",np.sum(pred * h))
print("Actual FDR:", 1-np.sum(pred * h) / np.sum(pred))
#def softmax_prob_cal(X,Centorid, intensity=1):
# dist=np.zeros([n_samples,num_clusters])
# dist+=np.sum(X*X,axis=1, keepdims=True)
# dist+=np.sum(centroid.T*centroid.T,axis=0, keepdims=True)
# dist -= 2*X.dot(centroid.T)
# dist=np.exp(dist*intensity)
# dist /= np.sum(dist,axis=1, keepdims=True)
# return dist
#def get_network(num_layers = 10, node_size = 10, dim = 1, scale = 1, cuda = False):
#
#
# class Model(nn.Module):
# def __init__(self, num_layers, node_size, dim):
# super(Model, self).__init__()
# l = []
# l.append(nn.Linear(dim,node_size))
# l.append(nn.LeakyReLU(0.1))
# for i in range(num_layers - 2):
# l.append(nn.Linear(node_size,node_size))
# l.append(nn.LeakyReLU(0.1))
#
# l.append(nn.Linear(node_size,1))
# l.append(nn.Sigmoid())
#
# self.layers = nn.Sequential(*l)
#
#
#
# def forward(self, x):
# x = self.layers(x)
# x = 0.5 * scale * x
# return x
#
#
#
#
# network = Model(num_layers, node_size, dim)
# if cuda:
# return network.cuda()
# else:
# return network
#def train_network_to_target_p(network, optimizer, x, target_p, num_it = 1000, dim = 1, cuda = False):
# target = Variable(torch.from_numpy(target_p.astype(np.float32)))
# l1loss = nn.L1Loss()
# batch_size = len(x)
# n_samples = len(x)
# loss_hist = []
# choice = range(n_samples)
# x_input = Variable(torch.from_numpy(x[choice].astype(np.float32).reshape(batch_size,dim)))
#
#
# if cuda:
# x_input = x_input.cuda()
# target = target.cuda()
#
# for iteration in range(num_it):
# if iteration % 100 == 0:
# print iteration
#
# optimizer.zero_grad()
# output = network.forward(x_input)
#
# loss = l1loss(output, target)
# loss.backward()
#
# optimizer.step()
# loss_hist.append(loss.data[0])
#
# return loss_hist
#
#def train_network(network, optimizer, x, p, num_it = 3000, alpha = 0.05, dim = 1, lambda_ = 20, lambda2_ = 1e3, cuda = False, fdr_scale = 1, mirror = 1):
#
# batch_size = len(x)
# n_samples = len(x)
# print(batch_size, n_samples)
# loss_hist = []
# soft_compare = nn.Sigmoid()
#
# relu = nn.ReLU()
# choice = range(n_samples)
# x_input = Variable(torch.from_numpy(x[choice].astype(np.float32).reshape(batch_size,dim)))
# p_input = Variable(torch.from_numpy(p[choice].astype(np.float32).reshape(batch_size,1)))
#
# if cuda:
# x_input = x_input.cuda()
# p_input = p_input.cuda()
#
# for iteration in range(num_it):
# if iteration % 100 == 0:
# print iteration
#
#
# optimizer.zero_grad()
# output = network.forward(x_input)
# s = torch.sum(soft_compare((output - p_input) * lambda2_)) / batch_size #disco rate
# s2 = torch.sum(soft_compare((p_input - (mirror - output * fdr_scale)) * lambda2_)) / batch_size /float(fdr_scale)#false discoverate rate(over all samples)
#
# gain = s - lambda_ * relu((s2 - s * alpha))
#
# loss = -gain
# loss.backward()
#
# optimizer.step()
# loss_hist.append(loss.data[0])
#
# return loss_hist, s, s2
#
#
##def | |
CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=10):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step ** 1.5)
arg2 = step * ((self.warmup_steps+10) ** -1.3)
lr = tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
return lr
d_model = 500
learning_rate_custom_1 = CustomSchedule(d_model)
plt.plot(learning_rate_custom_1(tf.range(n_epochs, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
learning_rate_custom_2 = tf.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=35,
decay_rate=0.90,
staircase=True)
plt.plot(learning_rate_custom_2(tf.range(n_epochs, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
"""Optimizer selected: Adamax"""
optimizer = tf.keras.optimizers.Adamax(learning_rate=learning_rate_custom_2)
"""## Architecture"""
# Input Layer
X = Input(shape=(None, ), batch_size=batch_size)
# Embedding Layer
embedded = Embedding(vocab_size, embedding_size,
batch_input_shape=(batch_size, None),
embeddings_regularizer=tf.keras.regularizers.L2()
)(X)
# Dense layer
embedded = Dense(embedding_size, relu)(embedded)
# First LSTM
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_1,return_sequences=True,return_state=True)(embedded)
encoder_output = BatchNormalization()(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Dense layer
encoder_output = Dense(embedding_size, activation='relu')(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Concat of first LSTM hidden state
initial_state_double = [tf.concat([hidden_state, hidden_state], 1), tf.concat([hidden_state, hidden_state], 1)]
# Second LSTM
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_2,
return_sequences=True,
return_state=True)(encoder_output, initial_state=initial_state_double)
encoder_output = BatchNormalization()(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Dense layer
encoder_output = Dense(hidden_size, activation='relu')(encoder_output)
# Dropout
encoder_output = Dropout(dropout_value)(encoder_output)
# Prediction Layer
Y = Dense(units=vocab_size)(encoder_output)
# Compile model
model = Model(inputs=X, outputs=Y)
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True), optimizer=optimizer)
print(model.summary())
"""## Training"""
min_custom_loss = 1.0 # max value for the custom loss
min_custom_epoch = 0 # epoch of minimum custom loss
def train_on_batch(x, y, min_custom_loss):
with tf.GradientTape() as tape:
# returns a tensor with shape (batch_size, len_text)
y_predicted = model(x)
scce = tf.keras.losses.sparse_categorical_crossentropy(y, y_predicted, from_logits = True)
# we cant return a tensor with that shape so we return a float that are summed
custom = get_custom_loss(y_predicted, y)
current_loss = tf.reduce_mean(scce + custom)
gradients = tape.gradient(current_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
perp = perplexity_metric(tf.reduce_mean(scce))
# checking for the best model using custom loss
# needed to do here because here we can save the model
if custom < min_custom_loss:
min_custom_loss = custom
model.save("best_model.h5", overwrite=True)
return current_loss, scce, custom, perp, min_custom_loss
loss_history = []
custom_loss_history = []
perplexity_history = []
for epoch in range(n_epochs):
start = time.time()
# Take subsets of train and target
sample = np.random.randint(0, text_matrix.shape[0]-1, subset_size)
sample_train = text_matrix[ sample , : ]
sample_target = text_matrix[ sample+1 , : ]
for iteration in range(sample_train.shape[0] // batch_size):
take = iteration * batch_size
x = sample_train[ take:take+batch_size , : ]
y = sample_target[ take:take+batch_size , : ]
current_loss, scce, custom, perplexity, new_min_custom_loss = train_on_batch(x, y, min_custom_loss)
# save infos about the new min_custom_loss
if new_min_custom_loss < min_custom_loss:
min_custom_loss = new_min_custom_loss
min_custom_epoch = epoch
loss_history.append(current_loss)
custom_loss_history.append(custom)
perplexity_history.append(perplexity)
print("{}. \t Total-Loss: {} \t Custom-Loss: {} \t Perplexity: {} \t Time: {} sec/epoch".format(
epoch+1, current_loss.numpy(), custom, perplexity, round(time.time()-start, 2)))
model.save(F"/content/gdrive/My Drive/DeepComedyModels/deep_comedy_custom_loss_01_62char.h5")
"""## Graphs"""
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Total Loss', color=color)
ax1.plot(loss_history, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Custom Loss', color=color) # we already handled the x-label with ax1
ax2.plot(custom_loss_history, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
print("The min custom loss is at iteration: {}".format(min_custom_epoch*1000))
plt.plot(perplexity_history)
plt.xlabel("Iterations")
plt.ylabel("Perplexity")
plt.show()
"""# Generative Model
At this point, let's check how the model generates text. In order to do it, we must make some changes to my RNN architecture above.
First, we must change the fixed batch size. After training, we want to feed just one sentence into my Network to make it continue the character sequence. We will feed a string into the model, make it predict the next character, update the input sequence, and repeat the process until a long generated text is obtained. Because of this, the succession of input sequences is now different from training session, in which portions of text were sampled randomly. we now have to set `stateufl = True` in the `LSTM()` layer, so that each LSTM cell will keep in memory the internal state from the previous sequence. With this we make the model remember better sequential information while generating text.
We will instantiate a new `generator` RNN with these new features, and transfer the trained weights of my `RNN` into it.
## Architecture
"""
# Input Layer
X = Input(shape=(None, ), batch_size=1)
embedded = Embedding(vocab_size, embedding_size)(X)
embedded = Dense(embedding_size, relu)(embedded)
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_1,
return_sequences=True,
return_state=True,
stateful=True)(embedded)
encoder_output = BatchNormalization()(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
encoder_output = Dense(embedding_size, activation='relu')(encoder_output)
initial_state_double = [tf.concat([hidden_state, hidden_state], 1), tf.concat([hidden_state, hidden_state], 1)]
encoder_output, hidden_state, cell_state = LSTM(units=lstm_unit_2,
return_sequences=True,
return_state=True,
stateful=True)(encoder_output, initial_state=initial_state_double)
encoder_output = BatchNormalization()(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
encoder_output = Dense(hidden_size, activation='relu')(encoder_output)
encoder_output = Dropout(dropout_value)(encoder_output)
Y = Dense(units=vocab_size)(encoder_output)
# Compile model
generator = Model(inputs=X, outputs=Y)
generator.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True), optimizer=optimizer)
print(generator.summary())
"""## Loading weights"""
# Import trained weights from RNN to generator
load_file = False
if load_file:
generator.load_weights("best_model.h5")
else:
generator.set_weights(model.get_weights())
"""## Generating methods"""
def generate_text(start_string, model, num_generate = 1000, temperature = 1.0):
# Vectorize input string
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = [] # List to append predicted chars
predicted_ids = []
idx2char = { v: k for k, v in char2idx.items() } # invert char-index mapping
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
# sample next char based on distribution and temperature
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
input_eval = tf.expand_dims([predicted_id], 0) # one letter input
# build the input for the next iteration, based on the last 5 characters generated
# become like a poetry!
#predicted_ids.append(predicted_id)
#if len(predicted_ids) > 5:
# predicted_ids = predicted_ids[1:]
#input_eval = tf.expand_dims(predicted_ids, 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
"""## Text generation"""
# Let's feed the first lines:
start_string = """
Nel mezzo del cammin di nostra vita
mi ritrovai per una selva oscura,
chè la diritta via era smarrita.
"""
for t in [0.1, 0.2, 0.3, 0.5, 1.0]:
print("####### TEXT GENERATION - temperature = {}\n".format(t))
print(generate_text(start_string, generator, num_generate = 1000, temperature = t))
print("\n\n\n")
# Exam mode for 1 Canto so 33 terzine. 4000 characters to write
start_inferno = """
Nel mezzo del cammin di nostra vita
mi ritrovai per una selva oscura,
chè la diritta via era smarrita.
"""
start_purgatorio = """
Per correr miglior acque alza le vele
omai la navicella del mio ingegno,
che lascia dietro a se mar si crudele;
"""
start_paradiso = """
La gloria di colui che tutto move
per l'universo penetra, e risplende
in una parte più e meno altrove.
"""
start_new = """
"""
start = time.time()
generated = generate_text(start_inferno, generator, num_generate = 7000, temperature = 0.1)
print("Time to generate {} characters: {} sec".format(7000, round(time.time()-start, 2)))
print(generated)
"""## Save generated Canto to file for Plagiarism Test and Metrics"""
with open("generated.txt", "w+") as text_file:
text_file.write(generated)
"""# Plagiarism Test
Include the file **ngrams_plagiarism.py** downloaded from Virtuale
This mehod needs two file, we called it generated.txt (the same for the Metrics) and Inferno.txt (the first Canto of the Inferno).
"""
from ngrams_plagiarism import ngrams_plagiarism
gen = open('generated.txt').read()
truth = open('Inferno.txt').read()
ngrams_plagiarism(gen, truth)
"""# Metrics
Include the content of the folder **Deep Comedy Metrics** downloaded from Virtuale.
This method needs one file:
* generated.txt: the file generated by the network
with UTF-8 Encoding!
"""
!python3 main.py
"""# Custom loss used for debug and explaination"""
#@title
#@DEBUG CUSTOM LOSS
x = [[49, 46, 36, 44, 49, 32, 48, 36, 1, 45, 1, 35, 51, 36, 1, 45, 1, 50,
48, 36, 1, 46, 36, 48, 1, 49, 36, 30, 5, 0, 44, 45, 44, 1, 42, 32,
1, 37, 45, 48, 50, 51, 44, 32, 1, 35, 40, 1, 46, 48, 40, 43, 32, 1,
52, 32, 34, 32, 44, 50, 36, 5, 0, 44, 45, 44, 1, 35, 36, 34, 40, 43,
32, 49, 5, 1, 47, 51, 32, 36, 1, 49, 51, 44, 50, 1, 46, 32, 51, 46,
36, 48, 51, 43, 1, 14, 36, 40, 5, 1, 0, 0, 32, 35, 35, 40, 43, 32,
44, 35, 60, 5, 1, 43, 32, 1, 34, 45, 44, 50, 48, 45, 1, 32, 42, 1,
43, 45, 44, 35, 45, 1, 36, 48, 48, 32, 44, 50, 36, 0, 42, 40, 34, 36,
44, 55, 32, 1, 35, 40],
[42, 1, 34, 45, 44, 49, 40, 38, 42, 40, 45, 1, 44, 36, 42, 1, 47, 51,
32, 42, 36, 1, 45, 38, 44, 36, 1, 32, 49, 46, 36, 50, 50, 45, 0, 34,
48, 36, 32, 50, 45, 1, 58, 1, 52, 40, 44, 50, 45, 1, 46, 48, 40, 32,
1, 34, 39, 36, 1, 52, 32, 35, 32, 1, 32, 42, 1, 37, 45, 44, 35, 45,
5, 1, 0, 0, 46, 36, 48, 60, 1, 34, 39, 36, 1, 32, 44, 35, 32, 49,
49, 36, 1, 52, | |
import numpy as np
import pyDOE2
import sample_generator as sg
from copy import deepcopy
import os
import glob
import pickle
import sys
import emcee
from linna.nn import *
from scipy.special import erf
from scipy.stats import chi2
import io
import gc
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils import mkldnn as mkldnn_utils
from linna.util import *
import tempfile
def ml_sampler(outdir, theory, priors, data, cov, init, pool, nwalkers, gpunode, omegab2cut=None, nepoch=4500, method="zeus", nbest=None, chisqcut=None, loglikelihoodfunc=None):
"""
LINNA main function with hyperparameters set to values described in To et al. 2022
Args:
outdir (string): output directory
theory (function): theory model
priors (dict of str: [float, float]): string can be either flat or gauss. If the string is 'flat', [a,b] indicates the lower and upper limits of the prior. If the string is 'gauss', [a,b] indicates the mean and sigma.
data (1d array): float array, data vector
cov (2d array): float array, covariance matrix
init (ndarray): initial guess of mcmc,
pool (mpi pool, optional): a mpi pool instance that can do pool.map(function, iterables).
nwalkers (int) number of mcmc walkers
gpunode (string): name of gpu node
omegab2cut (list of int): 2 elements containing the lower and upper limits of omegab*h^2
nepoch (int, optional): maximum number of epoch for the neural network training
method (string, optional): Samplers. LINNA supports `emcee` and `zeus`(default)
nbest (int or list of int): number of points to include in the training set per iteration according to the optimizer
chisqcut (float, optional): cut the training data if there chisq is greater than this value
loglikelihoodfunc (callable, optional): function of model, data , inverse of covariance matrix and return the log liklihood value. If None, then use gaussian likelihood
Returns:
nd array: MCMC chain
1d array: log probability of MCMC chain
"""
ntrainArr = [10000, 10000, 10000, 10000]
nvalArr = [500, 500, 500, 500]
if method=="emcee":
nkeepArr = [2, 2, 5, 4]
ntimesArr = [5, 5, 10, 15]
ntautolArr = [0.03, 0.03, 0.02, 0.01]
temperatureArr = [4.0, 2.0, 1.0, 1.0]
meanshiftArr = [0.2, 0.2, 0.2, 0.2]
stdshiftArr = [0.15,0.15,0.15,0.15]
elif method=="zeus":
nkeepArr = [2, 2, 5, 5]
ntimesArr = [5, 5, 10, 50]
ntautolArr = [0.03, 0.03, 0.02, 0.01]
temperatureArr = [4.0, 2.0, 1.0, 1.0]
meanshiftArr = [0.2, 0.2, 0.2, 0.2]
stdshiftArr = [0.15,0.15,0.15,0.15]
else:
raise NotImplementedError(method)
dolog10index = None
ypositive = False
device = "cuda"
docuda=False
tsize=1
nnmodel_in = ChtoModelv2
params = {}
params["trainingoption"] = 1
params["num_epochs"] = nepoch
params["batch_size"] = 500
return ml_sampler_core(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, meanshiftArr, stdshiftArr, outdir, theory, priors, data, cov, init, pool, nwalkers, device, dolog10index, ypositive, temperatureArr, omegab2cut, docuda, tsize, gpunode, nnmodel_in, params, method, nbest=nbest, chisqcut=chisqcut, loglikelihoodfunc=loglikelihoodfunc)
def ml_sampler_core(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, meanshiftArr, stdshiftArr, outdir, theory, priors, data, cov, init, pool, nwalkers, device, dolog10index, ypositive, temperatureArr, omegab2cut=None, docuda=False, tsize=1, gpunode=None, nnmodel_in=None, params=None, method="emcee", nbest=None, chisqcut=None, loglikelihoodfunc=None, nsigma=3):
"""
LINNA main function
Args:
ntrainArr (int array): number of training data per iteration
nvalArr (int array): number of validation data per iteration
nkeepArr (int array): number of autocorrelation time to be kept
ntimesArr (int array): number of autocorrelation time to stop mcmc
ntautolArr (float array): error limit of autocorrelation time
meanshiftArr (float array): limit on mean shift of parameter estimation from the first and second half of the chain
stdshiftArr (float array): limit on std shift of parameter estimation from the first and second half of the chain
outdir (string): output directory
theory (function): theory model
priors (dict of str: [float, float]): string can be either flat or gauss. If the string is 'flat', [a,b] indicates the lower and upper limits of the prior. If the string is 'gauss', [a,b] indicates the mean and sigma.
data (1d array): float array, data vector
cov (2d array): float array, covariance matrix
init (ndarray): initial guess of mcmc,
pool (object): mpi4py pool instance
nwalkers (int) number of mcmc walkers
device (string): cpu or gpu
dolog10index (int array): index of parameters to do log10
ypositive (bool): whether the data vector is expected to be all positive
temperatureArr (float array): temperature parameters for each iteration
omegab2cut (list of int): 2 elements containing the lower and upper limits of omegab*h^2
docuda (bool): whether do gpu for evaluation
tsize (int, optional): number of cores for training
gpunode (string): name of gpu node
nnmodel_in (string): instance of neural network model
params (dictionary): dictionary of parameters
method (string): sampling method
nbest (int or list of int): number of points to include in the training set per iteration according to the optimizer
chisqcut (float, optional): cut the training data if there chisq is greater than this value
loglikelihoodfunc (callable): function of model, data , inverse of covariance matrix and return the log liklihood value
nsigma (float): the training point in the first iteration will be generated within nsigma of the gaussian prior
Returns:
nd array: MCMC chain
1d array: log probability of MCMC chain
"""
ndim = len(init)
sigma = np.sqrt(np.diag(cov))
inv_cov = np.linalg.inv(cov)
prior_range = []
for item in priors:
if item['dist'] == 'flat':
prior_range.append([item['arg1'], item['arg2']])
elif item['dist'] == 'gauss':
prior_range.append([item['arg1']-5*item['arg2'], item['arg1']+5*item['arg2']])
else:
print("not implement dist : {0}".format(item['dist']), flush=True)
assert(0)
transform = Transform(priors)
invtransform = invTransform(priors)
init = invtransform(init)
if method=="emcee":
filename = "chemcee_256.h5"
elif method == "zeus":
filename = "zeus_256.h5"
else:
raise NotImplementedError(method)
for i, (nt, nv, nk, ntimes, tautol, temperature, meanshift, stdshift) in enumerate(zip(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, temperatureArr, meanshiftArr, stdshiftArr)):
if isinstance(nbest, list):
nbest_in = nbest[i]
if nbest_in <=0:
nbest_in = None
else:
nbest_in = nbest
if nbest_in is not None:
tempdir = tempfile.TemporaryDirectory()
def negloglike(x):
d = data-theory([-1,x], tempdir)
return d.dot(inv_cov.dot(d))
else:
negloglike=None
temperature = temperature**2
print("#"*100)
print("iteration: {0}".format(i), flush=True)
print("#"*100)
outdir_in = os.path.join(outdir, "iter_{0}/".format(i))
if i==0:
chain=None
else:
chain_name = os.path.join(os.path.join(outdir, "iter_{0}/".format(i-1)), filename[:-3])
if os.path.isfile(chain_name+".h5"):
chain_name = chain_name+".h5"
chain, _temp, _temp2= read_chain_and_cut(chain_name.format(i-1), nk, ntimes, method=method)
else:
chain_name = chain_name+".txt"
chain = np.loadtxt(chain_name)[-100000:,:-1]
#Generate training
ntrain = nt
nval = nv
nnsampler = NN_samplerv1(outdir_in, prior_range)
if "trainingoption" in params:
options = params['trainingoption']
else:
options = 0
generate_training_point(theory, nnsampler, pool, outdir_in, ntrain, nval, data, inv_cov, chain, nsigma=nsigma, omegab2cut=omegab2cut, options=options, negloglike= negloglike, nbest_in=nbest_in, chisqcut=chisqcut)
chain = None
del chain
if i!=0:
try:
del _temp
del _temp2
except:
pass
gc.collect()
if (pool is None) or pool.is_master():
outdir_list = [os.path.join(outdir, "iter_{0}/".format(m)) for m in range(int(i+1))]
f = open(outdir_list[-1]+"/model_pickle.pkl", 'wb')
pickle.dump(train_NN, f)
f.close()
f = open(outdir_list[-1]+"/model_args.pkl", 'wb')
if gpunode is not None:
docuda=True
else:
docuda=torch.cuda.is_available()
pickle.dump([nnsampler, cov, inv_cov, sigma, outdir_in, outdir_list, data, dolog10index, ypositive, False, 2, temperature, docuda, None, 1, nnmodel_in, params, nbest_in is not None], f)
f.close()
if not os.path.isfile(outdir_list[-1] + "/finish.pkl"):
if gpunode == 'automaticgpu':
while(True):
gpufile = os.path.join(outdir, "gpunodeinfo.pkl")
try:
if os.path.isfile(gpufile):
with open(gpufile, 'rb') as f:
gpuinfo = pickle.load(f)
gpunode = gpuinfo["nodename"]
break
except:
pass
if gpunode is not None:
print("running gpu on {0}".format(gpunode), flush=True)
os.system("cat {2}/train_gpu.py | ssh {0} python - {1} {3}".format(gpunode, outdir_list[-1], os.path.dirname(os.path.abspath(__file__)), "cuda"))
while(1):
if os.path.isfile(outdir_list[-1] + "/finish.pkl"):
break
else:
os.system("python {1}/train_gpu.py {0} {2}".format(outdir_list[-1], os.path.dirname(os.path.abspath(__file__)), "nocuda"))
while(1):
if os.path.isfile(outdir_list[-1] + "/finish.pkl"):
break
#Retrieve model
model, y_invtransform_data = retrieve_model(outdir_in, len(init), len(data), nnmodel_in)
if not docuda:
model.model = model.model.to(memory_format=torch.channels_last)
model.MKLDNN=True
#Do MCMC
if os.path.isfile(os.path.join(outdir_in, filename)):
continue
invcov_new = torch.from_numpy(inv_cov.astype(np.float32)).to('cpu').detach().clone().requires_grad_()
data_new = torch.from_numpy(data.astype(np.float32)).to('cpu').detach().clone().requires_grad_()
if loglikelihoodfunc is None:
loglikelihoodfunc = gaussianlogliklihood
log_prob = Log_prob(data_new, invcov_new, model, y_invtransform_data, transform, temperature, nograd=True, loglikelihoodfunc=loglikelihoodfunc)
dlnp = None
ddlnp = None
if pool is not None:
pool.noduplicate=True
run_mcmc(nnsampler, outdir_in, method, ndim, nwalkers, init, log_prob, dlnp=dlnp, ddlnp=ddlnp, pool=pool, transform=transform, ntimes=ntimes, tautol=tautol, meanshift=meanshift, stdshift=stdshift, nk=nk)
if pool is not None:
pool.noduplicate_close()
chain_name = os.path.join(os.path.join(outdir, "iter_{0}/".format(len(ntrainArr)-1)), filename[:-3])
if os.path.isfile(chain_name+".h5"):
chain_name = chain_name+".h5"
chain, log_prob_samples_x, reader = read_chain_and_cut(chain_name.format(len(ntrainArr)-1), nk, ntimes, method=method)
log_prob_samples_x = reader.get_log_prob(discard=0, flat=True, thin=1)
else:
chain_name = chain_name+".txt"
chain = np.loadtxt(chain_name)[-100000:,:-1]
log_prob_samples_x = np.loadtxt(chain_name)[-100000:,-1]
#Optional importance sampling
if 'nimp' | |
<reponame>Epihaius/panda3dstudio
from ....base import *
from math import sin, cos, acos
class ExtrusionInsetMixin:
""" PolygonEditMixin class mix-in """
def __compute_extr_inset_data(self, main_extr_vec_only=False, poly_ids=None):
"""
Compute the data for previewing or creating extrusions and insets.
This data includes direction vectors for each of these operations, needed
to offset the vertices of the newly created polygons.
"""
merged_verts = self.merged_verts
merged_edges = self.merged_edges
verts = self._subobjs["vert"]
edges = self._subobjs["edge"]
polys = self._subobjs["poly"]
target_poly_ids = set(poly_ids if poly_ids else self._selected_subobj_ids["poly"])
target_polys = set(polys[p_id] for p_id in target_poly_ids)
# a polygon region is a collection of polygons, each of which shares at
# least one edge with at least one other polygon in that collection
regions = []
borders = []
avg_normals = {}
extr1_vecs = {} # per-vertex averaged polygon normal
extr2_vecs = {} # individual polygon normal
extr3_vecs = {} # per-region averaged polygon normal
inset1_vecs = {}
inset2_vecs = {}
sides = {}
data = {
"regions": regions,
"borders": borders,
"avg_normals": avg_normals,
"extr1_vecs": extr1_vecs,
"extr2_vecs": extr2_vecs,
"extr3_vecs": extr3_vecs,
"inset1_vecs": inset1_vecs,
"inset2_vecs": inset2_vecs,
"sides": sides
}
edge_vert_ids1 = []
computed_extr1_vecs = []
computed_extr3_vecs = []
sign = -1 if self.owner.has_inverted_geometry() else 1
def get_poly_neighbor_ids(poly):
"""
Return the IDs of the polygons that share an edge with the given poly.
"""
neighbor_ids = set()
for edge in poly.edges:
neighbor_ids.update(edge.merged_edge.polygon_ids)
neighbor_ids.remove(poly.id)
return neighbor_ids
def get_polygon_region(poly_id):
"""
Return the region of contiguously connected polygons, including the
one with the given poly_id.
"""
poly = polys[poly_id]
poly_ids = {poly_id}
neighbor_ids = list(get_poly_neighbor_ids(poly) & target_poly_ids)
while neighbor_ids:
neighbor_id = neighbor_ids.pop()
neighbor = polys[neighbor_id]
neighbor_ids.extend(get_poly_neighbor_ids(neighbor) & target_poly_ids - poly_ids)
poly_ids.add(neighbor_id)
return set(polys[p_id] for p_id in poly_ids)
def get_border_edge_verts(region):
"""
Return a list of edges that make up the border(s) of the given
polygon region, with each edge represented by a (end_merged_vert1,
end_merged_vert2) tuple.
"""
edge_mvs = []
for p in region:
ids = (edge[:] for edge in p.edges)
mvs = ((merged_verts[vi1], merged_verts[vi2]) for vi1, vi2 in ids)
for mv_tuple in mvs:
if mv_tuple[::-1] in edge_mvs:
edge_mvs.remove(mv_tuple[::-1])
else:
edge_mvs.append(mv_tuple)
return edge_mvs
def compute_inset_vectors(region, border_data):
"""
For each vertex, compute the vector used to inset all selected polys
connected at that vertex.
"""
border_edge_loop, intersection_data = border_data
border_merged_verts = [merged_verts[e[1]] for e in border_edge_loop]
old_merged_verts = {}
for index, split_merged_vert in intersection_data:
old_merged_vert = border_merged_verts[index]
for v_id in split_merged_vert:
old_merged_verts[v_id] = old_merged_vert
merged_verts[v_id] = split_merged_vert
border_merged_verts[index] = split_merged_vert
border_merged_verts.append(border_merged_verts[0])
for i in range(len(border_merged_verts) - 1):
v0 = border_merged_verts[i - (2 if i == 0 else 1)]
v1 = border_merged_verts[i]
v2 = border_merged_verts[i + 1]
vec1 = (v0.get_pos() - v1.get_pos()).normalized()
vec2 = (v2.get_pos() - v1.get_pos()).normalized()
connected_edges = (e for e in v1.connected_edges
if polys[e.polygon_id] in region)
shared_edges = [e.merged_edge for e in connected_edges]
shared_edges = set(me for me in shared_edges
if shared_edges.count(me) > 1)
if shared_edges:
inset1_vec = sum((me.get_direction_vector(v1.id)
for me in shared_edges), Vec3()).normalized()
angle1 = acos(vec1.dot(inset1_vec))
angle2 = acos(inset1_vec.dot(vec2))
cosine = cos(angle1 + angle2)
else:
inset1_vec = (vec1 + vec2).normalized()
cosine = vec1.dot(vec2)
normal = sum((p.normal for p in v1.connected_polys
& region), Vec3()).normalized()
if cosine < -.999:
inset_scale = 1.
if not shared_edges:
inset1_vec = vec1.cross(normal).normalized()
else:
sine = sin(acos(min(1., cosine)) * .5)
inset_scale = 1. / sine if sine > .0001 else 0.
if not shared_edges:
cross_vec = vec2.cross(vec1).normalized()
# reverse inset vector when inner corner angle > 180 degrees
# (i.e. where the polygon is concave)
inset1_vec *= -1. if cross_vec.dot(normal) < 0. else 1.
inset1_vec = Vec4(*inset1_vec, inset_scale)
for vert_id in v1:
if polys[verts[vert_id].polygon_id] in region:
inset1_vecs[vert_id] = inset1_vec
merged_verts.update(old_merged_verts)
def compute_per_vertex_extrusion_vector(merged_vert, region):
"""
Compute the vector used to extrude polys connected at the given vertex;
it is computed in a way that depends on the number of connected
polygons (as an optimization, polygons with (almost) duplicate
normals are discarded):
*) two polygons:
the extrusion vector points to the closest point on the intersection
line of the polygon planes;
*) three polygons:
the extrusion vector points to the intersection point of the polygon
planes;
*) four or more polygons:
the extrusion vector is the normalized sum of the normals of
all polygons connected at this vertex, scaled by the length of
another vector;
polygon normals are sorted by their dot product with the average
polygon normal, smallest to largest; only the first four normals
will be considered (otherwise the computation might become too
slow, even though this restriction can lead to suboptimal results);
the corresponding planes should be the four most significant ones
as they make the sharpest angles;
with P1 the intersection point of the first three planes and P2
the intersection point of the first two planes with the fourth, the
scale vector points to the median of P1 and P2.
"""
polys_at_vert = merged_vert.connected_polys & region
normals_at_vert = [p.normal.normalized() for p in polys_at_vert]
avg_poly_normal = sum(normals_at_vert, Vec3()).normalized()
normals = []
for n in normals_at_vert:
for other_n in normals:
if abs(n.dot(other_n)) > .999:
break
else:
normals.append(n)
normals_by_dot = {(avg_poly_normal.dot(n), i): n for i, n in enumerate(normals)}
normals = [normals_by_dot[d] for d in sorted(normals_by_dot)][:4]
planes = [Plane(n, Point3() + n) for n in normals]
point_on_line = Point3()
line_vec = Vec3()
intersection_point = Point3()
if len(planes) == 1:
# there's only one poly at the vertex; the extrusion vector
# is the normal to that poly
extrusion_vec = normals[0]
else:
if planes[0].intersects_plane(point_on_line, line_vec, planes[1]):
if len(planes) == 2:
# there are two polys at the vertex; the extrusion
# vector is perpendicular to the intersection line of
# both polygon planes
extrusion_vec = Vec3(point_on_line)
extrusion_vec -= extrusion_vec.project(line_vec)
elif len(planes) == 2:
extrusion_vec = normals[0]
if len(planes) < 3:
return extrusion_vec * sign
scale_vec = None
while len(planes) > 2:
if planes.pop(2).intersects_line(intersection_point, point_on_line,
point_on_line + line_vec):
tmp_vec = Vec3(intersection_point)
else:
tmp_vec = None
if scale_vec and tmp_vec:
scale_vec = (scale_vec + tmp_vec) * .5
else:
scale_vec = tmp_vec
l = scale_vec.length() if scale_vec else 1.
return avg_poly_normal * l * sign
# Process all target polygons; compute the extrusion and inset vectors for
# all of their vertices.
for poly_id in target_poly_ids:
poly = polys[poly_id]
for i, region in enumerate(regions):
if poly in region:
edge_mvs = edge_vert_ids1[i]
tmp_extr_vecs = computed_extr1_vecs[i]
extr3_vec = Vec3() if main_extr_vec_only else computed_extr3_vecs[i]
break
else:
region = get_polygon_region(poly_id)
regions.append(region)
edge_mvs = get_border_edge_verts(region)
edge_vert_ids1.append(edge_mvs)
border_edges = self.get_region_border_edges(region)
if border_edges:
borders.append(border_edges)
tmp_extr_vecs = {}
computed_extr1_vecs.append(tmp_extr_vecs)
if not main_extr_vec_only:
# compute the vector used to extrude the polygon region at every
# vertex; it is the per-region averaged polygon normal
extr3_vec = sum((p.normal.normalized() for p in region),
Vec3()).normalized()
computed_extr3_vecs.append(extr3_vec)
for border_data in border_edges:
compute_inset_vectors(region, border_data)
poly_verts = poly.vertices
for vert in poly_verts:
merged_vert = vert.merged_vertex
if merged_vert in tmp_extr_vecs:
extr1_vec, avg_normal = tmp_extr_vecs[merged_vert]
else:
extr1_vec = compute_per_vertex_extrusion_vector(merged_vert, region)
if main_extr_vec_only:
avg_normal = Vec3()
else:
# As an alternative, compute the averaged vertex normal
verts_in_sel = (v for v in merged_vert.connected_verts
if polys[v.polygon_id] in region)
avg_normal = sum((v.normal for v in verts_in_sel),
Vec3()).normalized() * sign
tmp_extr_vecs[merged_vert] = (extr1_vec, avg_normal)
extr1_vecs[vert.id] = extr1_vec
avg_normals[vert.id] = avg_normal
# the vector used to extrude an individual poly is just its normalized normal
extr2_vec = Vec3() if main_extr_vec_only else poly.normal.normalized()
extr2_vecs[poly_id] = extr2_vec
# store the per-region averaged polygon normal
extr3_vecs[poly_id] = Vec3() if main_extr_vec_only else extr3_vec
if not main_extr_vec_only:
# Compute the vectors used to inset individual polys
poly_verts.append(poly_verts[0])
for i in range(len(poly_verts) - 1):
v0 = poly_verts[i - (2 if i == 0 else 1)]
v1 = poly_verts[i]
v2 = poly_verts[i + 1]
vec1 = (v0.get_pos() - v1.get_pos()).normalized()
vec2 = (v2.get_pos() - v1.get_pos()).normalized()
cosine = vec1.dot(vec2)
if cosine < -.999:
inset_scale = 1.
inset2_vec = vec1.cross(extr2_vec).normalized()
else:
sine = sin(acos(min(1., cosine)) * .5)
inset_scale = 1. / sine if sine > .0001 else 0.
inset2_vec = (vec1 + vec2).normalized()
cross_vec = vec2.cross(vec1).normalized()
# reverse inset vector when inner corner angle > 180 degrees
# (i.e. where the polygon is concave)
inset2_vec *= -1. if | |
import json
from copy import deepcopy
from random import shuffle
cards = ['magician', 'high priestess', 'empress', 'emperor', 'hierophant', 'lovers', 'chariot', 'justice', 'hermit',
'wheel of fortune', 'strength', 'hanged man', 'death', 'temperance', 'devil', 'tower', 'star', 'moon', 'sun',
'judgement', 'world', 'fool', 'king of wands', 'queen of wands', 'knight of wands', 'page of wands',
'ten of wands', 'nine of wands', 'eight of wands', 'seven of wands', 'six of wands', 'five of wands',
'four of wands', 'three of wands', 'two of wands', 'ace of wands', 'king of cups', 'queen of cups',
'knight of cups', 'page of cups', 'ten of cups', 'nine of cups', 'eight of cups', 'seven of cups',
'six of cups', 'five of cups', 'four of cups', 'three of cups', 'two of cups', 'ace of cups', 'king of swords',
'queen of swords', 'knight of swords', 'page of swords', 'ten of swords', 'nine of swords', 'eight of swords',
'seven of swords', 'six of swords', 'five of swords', 'four of swords', 'three of swords', 'two of swords',
'ace of swords', 'king of coins', 'queen of coins', 'knight of coins', 'page of coins', 'ten of coins',
'nine of coins', 'eight of coins', 'seven of coins', 'six of coins', 'five of coins', 'four of coins',
'three of coins', 'two of coins', 'ace of coins']
upright = {'magician': 'creativity, self-confidence, dexterity, sleight of hand,will-power, skill',
'high priestess': 'knowledge, wisdom, learning, intuition, impatience, virtue, purity',
'empress': 'development, accomplishment action, evolution',
'emperor': 'authority, father-figure, structure, solid foundation',
'hierophant': 'mercy, conformity, forgiveness, social approval, bonded, inspiration',
'lovers': 'harmony, trust,romance, optimism, honor, love, harmony',
'chariot': 'perseverance, rushed decision, turmoil, vengeance, adversity',
'justice': 'equality, righteousness, virtue, honor, harmony, balance',
'hermit': 'inner strength, prudence, withdrawal, caution, vigilance',
'wheel of fortune': 'unexpected events, advancement, destiny, fortune, progress',
'strength': 'courage, conviction, strength, determination, action, heroism, virility',
'hanged man': 'change, reversal, boredom, improvement, rebirth, suspension, change',
'death': 'unexpected change, loss, failure, transformation, death, bad luck',
'temperance': 'temperance, patience, good influence, confidence, moderation',
'devil': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'tower': 'downfall, unexpected failure, controversy, ravage, disaster, ill tempered',
'star': 'balance, pleasure, optimism, insight, spiritual love, hope, faith',
'moon': 'double-dealing Deception, disillusionment, trickery, error, danger, disgrace',
'sun': 'accomplishment, success, love, joy, happy marriage, satisfaction',
'judgement': 'awakening, renewal, rejuvenation, rebirth, improvement, promotion, atonement, judgment',
'world': 'perfection, recognition, success, fulfillment, eternal life',
'fool': 'beginnings possibilities, pleasure, thoughtlessness, adventure, opportunity',
'king of wands': 'passionate, good leader, noble',
'queen of wands': 'fondness, attraction, command ',
'knight of wands': 'generous, journey, impetuous',
'page of wands': 'enthusiasm, exploration, discovery, free spirit',
'ten of wands': 'pain, ruined, failure',
'nine of wands': 'victory, good health, obstinacy',
'eight of wands': 'new ideas, love, journey',
'seven of wands': 'stiff competition, victory, courage, energy',
'six of wands': 'leadership, good news, success',
'five of wands': 'lawsuit or quarrel, courage, competition',
'four of wands': 'dissatisfaction, kindness, reevaluation ',
'three of wands': 'cooperation, good partnership, success',
'two of wands': 'generous person, courage, patience, courage ',
'ace of wands': 'profitable journey, new business, beginning, new career, birth, inheritance',
'king of cups': 'kindness, willingness, enjoyment',
'queen of cups': 'loving mother, gentle, happiness',
'knight of cups': 'emotional, romantic dreamer, intelligence',
'page of cups': 'sweetness, interest in literature, gentleness',
'ten of cups': 'friendship, happiness, life',
'nine of cups': 'physical well-being, hopes, security',
'eight of cups': 'disappointment, abandonment, misery',
'seven of cups': 'imagination, illusion, directionless',
'six of cups': 'acquaintance, good memories, acquaintance, happiness',
'five of cups': 'broken marriage,vain regret, sorrow, loss',
'four of cups': 'dissatisfaction, kindness, reevaluation, redemption',
'three of cups': 'fortune, hospitality, discovery',
'two of cups': 'romance, friendship, cooperation',
'ace of cups': 'good health, love, joy, beauty',
'king of swords': 'powerful, friendship, counselor',
'queen of swords': 'skillful, brave, clever, rush',
'knight of swords': 'strong man, braver, clever person',
'page of swords': 'grace, diplomacy, dexterity, grace',
'ten of swords': 'defeat, failure, pain',
'nine of swords': 'desolation, illness, suspicion, cruelty',
'eight of swords': 'weakness, indecision, censure',
'seven of swords': 'betrayal, insolence, unwise attempt',
'six of swords': 'harmony, sorrow, journey',
'five of swords': 'defeat, cowardliness, empty victory',
'four of swords': 'temporary exile, strife, retreat',
'three of swords': 'broken relationship, civil war',
'two of swords': 'indecision, trouble, balanced',
'ace of swords': 'love, valiant, victory',
'king of coins': 'reliable person, steadiness ',
'queen of coins': 'thoughtfulness, intelligence, talents, melancholy ',
'knight of coins': 'dull outlook, patience, animal lover, trustworthy ',
'page of coins': 'kindness,new ideas/opinions, scholar ',
'ten of coins': 'wealth, property, stability ',
'nine of coins': 'solitude, well-being, green thumb ',
'eight of coins': 'employment, money, learning, trade',
'seven of coins': 'development, re-evaluation, effort, hard work ',
'six of coins': 'prosperity, philanthropy, charity, gifts ',
'five of coins': 'destitution, poor health, despair, loneliness ',
'four of coins': 'ungenerous, greed, miserly ',
'three of coins': 'abilities, approval,effort, abilities ',
'two of coins': 'harmony, new projects, helpful ',
'ace of coins': 'prosperity, happiness, pleasure'}
reverse = {'magician': 'delay, unimaginative, insecurity, lack of self-confidence',
'high priestess': 'selfishness, shallowness, misunderstanding, ignorance',
'empress': 'inaction, lack on concentration, vacillation, anxiety, infidelity',
'emperor': 'domination, excessive control, rigidity, inflexibility',
'hierophant': 'vulnerability, unconventionality, foolish generosity, impotence, frailty, unorthodoxy',
'lovers': 'separation, frustration, unreliability,fickleness, untrustworthy',
'chariot': 'vanquishment, defeat, failure, unsuccessful',
'justice': 'alse accusation, unfairness, abuse, biased',
'hermit': 'hastiness, rashness,immaturity, imprudence, foolishness',
'wheel of fortune': 'interruption, outside influences, failure, bad luck',
'strength': 'pettiness, sickness, unfaithfulness, weakness',
'hanged man': 'alse prophecy, useless sacrifice, unwillingness',
'death': 'immobility, slow changes, cheating, death, stagnation',
'temperance': 'conflict, disunion, frustration, impatience, discord',
'devil': 'release, enlightenment, divorce, recovery',
'tower': 'entrapment, imprisonment, old ways, rustic',
'star': 'disappointment, bad luck, imbalance, broken dreams',
'moon': 'trifling mistakes, deception discovered, negative advantage',
'sun': 'loneliness, canceled plans, unhappiness, break ups',
'judgement': 'disappointment, indecision, death, failure, ill-health, theft, worry',
'world': 'ack of vision, disappointment, imperfection',
'fool': 'indecision, hesitation, injustice, apathy, bad choice',
'king of wands': 'unyielding, prejudice, quarrels',
'queen of wands': 'jealous, revengeful, infidelity',
'knight of wands': 'suspicion, jealousy, narrow-mindedness',
'page of wands': 'setbacks to new ideas, pessimism, lack of direction',
'ten of wands': 'cleverness, energy, strength',
'nine of wands': 'weakness, ill-health, adversity',
'eight of wands': 'violence, quarrels, courage',
'seven of wands': 'advantage, patience, indecision',
'six of wands': 'postponement, bad news, pride in riches',
'five of wands': 'new opportunities, harmony, generosity',
'four of wands': 'new relationship, new ambitions, action',
'three of wands': 'carelessness, arrogance, pride, mistakes',
'two of wands': 'impatience, domination',
'ace of wands': 'selfishness, lack of determination, setback',
'king of cups': 'double-dealer, scandal, crafty, violent',
'queen of cups': 'perverse, unhappy, gloom, over-active imagination',
'knight of cups': 'idleness, untruthful, fraud, sensuality',
'page of cups': 'poor imagination, selfishness, no desires',
'ten of cups': 'waste, broken relationships, quarrel',
'nine of cups': 'illness, failure, overindulgence',
'eight of cups': 'pleasure, success, joy',
'seven of cups': 'will-power, determination',
'six of cups': 'friendship, disappointment, past',
'five of cups': 'return, summon, hope',
'four of cups': 'new goals, ambitions, beginning',
'three of cups': 'hidden, overindulgence, pain, gossip',
'two of cups': 'violent passion, misunderstanding',
'ace of cups': 'egotism, selfishness, hesitancy',
'king of swords': 'obstinate, evil intentions, judgments',
'queen of swords': 'sly, keen, deceitful',
'knight of swords': 'troublemaker, a crafty, tyranny',
'page of swords': 'imposture, ill-health, cunningness',
'ten of swords': 'courage, positive energy, good health',
'nine of swords': 'unselfishness, good news, healing',
'eight of swords': 'freedom, new beginnings, relaxation',
'seven of swords': 'counsel, helpful, advice',
'six of swords': 'obstacles, difficulties, defeat',
'five of swords': 'unfairness, defeat, loss',
'four of swords': 'social unrest, labor strikes, renewed activity',
'three of swords': 'sorrow, loss, confusion',
'two of swords': 'unscrupulous, release',
'ace of swords': 'obstacles, tyranny, power',
'king of coins': 'bribes, materialistic, calm',
'queen of coins': 'mistrust, suspicion, neglect',
'knight of coins': 'carelessness, standstill, irresponsible',
'page of coins': 'luxury, rebellious, bad news',
'ten of coins': 'dull, slothfulness, misfortune',
'nine of coins': 'caution, possible loss',
'eight of coins': 'void, no ambition, dislike',
'seven of coins': 'impatience, slow progress, investments',
'six of coins': 'jealousy, miserliness, unfairness',
'five of coins': 'employment, courage, revival',
'four of coins': 'spendthrift, obstacles, earthy possessions',
'three of coins': 'preoccupation, ambitions',
'two of coins': 'difficulty, discouragement',
'ace of coins': 'misery, greedy, money'}
class TarotDeck:
def __init__(self):
self.deck = | |
branch eventually converges to. To create a new branch:
#
# - Go to your repo `hello-world`
# - Click the drop down that says **branch:main**.
# - Type a branch name, `readme-edits`, into the new branch text box.
# - Select the blue **Create branch** box.
#
# Now you have two branches, `main` and `readme-edits`. These two branches are identical right now, but the changes you make to the `readme-edits` branch will not directly affect the `main` branch.
#
# ### Make and commit changes
#
# After creating your *readme-edits* branch, you should now be on the code-view for that branch. Let's make some edits and see what happens. Saved changes on GitHub are called commits. Each commit also contains a description explaining why a particular change was made. This is particurly useful for developing code over large periods of time or within groups. Let's make some changes to the **README.md** file and commit those to the repo:
# - Click the **README.md** file.
# - Click the pencil icon in the upper right corner to edit.
# - In the editor, write a bit about yourself. What are your research interests?
# - Write a commite message that describes your changes (e.g., "I'm super awesome because I can make changes to my very own GitHub repo!")
# - Click the **Commit changes** button.
#
# The changes are now saved to the `readme-edits` branch, so now this branch is different than the `main` branch*.
#
# ### Open a Pull Request
#
# A pull request is a way for you (and others) to suggest changes to the *main* branch. Since you made changes to the `readme-edits` branch, you can now issue a `pull request`. A pull request will show the differences between both branches. The changes, addition, and subtraction are shown in green and red. In this process, you can also use the @mention system with other GitHub users to have discussions about the pull request and receive feedback. Now we'll open a pull request so that you can see how to review changes (although you may not do this as often for your own repos).
#
# - Click the **Pull Request** tab, then click the green **New pull request** button.
# - In the **Example Comparisons** box, select the `readme-edits` branch to compare with `main`.
# - Look over the changes between branches and make sure they are what you want. Then click the green **Create Pull Request** button.
# - Give your pull request a title and write a brief description of your changes. Logging the changes in your edits will make it easier to diagnose problems later.
# - Click **Create pull request**!
#
# ### Merge your Pull Request
#
# In the previous 2 sections, you made a new branch of `hello-world`, edited the branch, and submitted a pull request. The final step is to bring the changes to the `main` branch. To merge your `readme-edits` branch into `main`:
#
# - Click the green **Marge pull request** button to merge the changes into `main`.
# - Click **Confirm merge**.
# - Delete the `readme-edits` branch, since its changes have been incorporated with the **Delete branch** button in the purple box.
#
# ### GitHub Desktop
#
# The above guide can be used to create a repository through the web interface of GitHub. When working with your own repos, it is a little easier to use **GitHub Desktop**, which is a desktop application that simplifies pushing changes to the `main` branch. This requires a software install, which can be found [here](https://desktop.github.com/). After installing the software, your can:
#
# - Clone your repository to your local path (File --> Clone repository).
# - Open your repo from your local path and edit your files.
# - After saving your files in the local path, the changes will appear in GitHub Desktop (simliar to a pull request).
# - Add a description of the changes and click commit to the *main* branch (blue button)
# - Click the **Push origin** button (update all your changes back to the web version of GitHub)
#
# ## LaTex (Preparing your work)
#
# In the past, scientists had to learn two skills: scientific inquiry and typesetting. However, mathematicians developed a typesetting software, LaTex, that was more programattic, which made it easier to typeset equations within a document. In this course, you will need to communicate your results to others (especially your instructor), where you will use LaTex. To make it easier, we will use the online platform **Overleaf**. Similar to the guide for GitHub, it is assumed that you have successfully created an Overleaf account. Note that Overleaf provides its own guides that can be found [here](https://www.overleaf.com/learn/how-to/Creating_a_document_in_Overleaf).
#
# ### Creating a project
#
# Creating a project in Overleaf can be accomplished in **two** ways: 1) start a project from scratch or 2) start a project from a template. To start a project from scratch:
#
# - Click the green **New Project** button
# - Select **Blank Project**
# - Name your project
# - Click **Create** and then the editor will open
#
# To start a project from a template:
#
# - Click the green **New Project** button
# - Select **Academic Journal** from the Templates
# - Find the **RevTex** tag at the bottom (collection of green tags) and Click it
# - Select the **RevTex 4.2** template from the American Physical Society
#
# When preparing your class assignments, you can build your project from scratch so that you can learn more about the LaTex environment. To submit each of your projects, you will need to build from the **RevTex** template because it will import the default style for a Journal like *Physical Review*.
#
# ### Your 1st Document
#
# Open the blank project that you created in the previous section. In this project, we will create a simple working example (look [here](https://www.overleaf.com/learn/latex/Creating_a_document_in_LaTeX)). A LaTex document contains some *front* matter, a *body*, and some *back* matter. The front matter tells the LaTex compiler what kind of document you are trying to create, how should the document be formatted *globally*. The body will have the text, figures, and tables in a manner similar to most word processors. The back matter will tell the LaTex compiler how to format references or setup an Appendix.
#
# Here's a simple working example:
#
# >\documentclass{article}
#
# >\begin{document}
#
# >First document. This is a simple example, with no extra parameters or packages included.
#
# >\end{document}
#
# This example will create an **article** document, adds the text, and compiles it as a *pdf* file in the right window.
#
# ### The Front Matter
#
# The *front* matter is everything before the *>\begin{document}* line. Here, we will replace the *\documentclass{article}* with the following:
#
# >\documentclass[12pt, letterpaper]{article}
#
# >\usepackage[margin=1.0in]{geometry}
#
# >\usepackage[utf8]{inputenc}
# >
# >\title{First document}
#
# >\author{<NAME> \thanks{funded by the Overleaf team}}
#
# >\date{\today}
#
# Here's an explanation of what we just added:
#
# >\documentclass[12pt, letterpaper]{article}
#
#
# This defines the type of document with some additional parameters inside brackets that are comma-separated can be passed to the command. The extra parameters set the font size (12pt) and the paper size (letterpaper). Note that Overleaf uses a European LaTeX distribution, which produces documents in A4 size by default, so letterpaper is important. Another important parameter that can be passed to the \documentclass command is twocolumn if you want your text in a two-column format and twoside for two-side paper sheet printing.
#
#
# >\usepackage[margin=1.0in]{geometry}
#
#
# This defines the page margins. Everything you submit should have 1 inch margins. More detail about paper size, orientation, and margins can be found [here](https://www.overleaf.com/learn/latex/Page_size_and_margins)
#
# >\usepackage[utf8]{inputenc}
#
#
# This is the encoding for the document to allow special characters beyond ASCII to be used in the text. It can be omitted or changed to another encoding but utf-8 is recommended. | |
influence of last segment, if needed
if self._end_infinite:
# Determine displacement vector magnitudes
r = r1[:,:,-1,:]
r_mag = vec_norm(r)
u = self._vertices[:,-1,:]-self._vertices[:,-2,:]
u /= vec_norm(u)[:,np.newaxis]
# Calculate influence
inf += vec_cross(u[np.newaxis,:,:], r)/(r_mag*(r_mag-vec_inner(u[np.newaxis,:,:], r)))[:,:,np.newaxis]
return 0.25/np.pi*np.nan_to_num(inf)
class FullStreamlineWake(SegmentedWake):
"""Defines a segmented wake which is updated to trace out entire streamlines beginning at the Kutta edges on each iteration.
Parameters
----------
kutta_edges : list of KuttaEdge
List of Kutta edges which define this wake.
N_segments : int, optional
Number of segments to use for each filament. Defaults to 20.
segment_length : float, optional
Length of each discrete filament segment. Defaults to 1.0.
end_segment_infinite : bool, optional
Whether the final segment of the filament should be treated as infinite. Defaults to False.
corrector_iterations : int, optional
How many times to correct the streamline (velocity) prediction for each segment. Defaults to 1.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Get kwargs
self._corrector_iterations = kwargs.get('corrector_iterations', 1)
def update(self, velocity_from_body, mu, v_inf, omega, verbose):
"""Updates the shape of the wake based on solved flow results.
Parameters
----------
velocity_from_body : callable
Function which will return the velocity induced by the body at a given set of points.
mu : ndarray
Vector of doublet strengths.
v_inf : ndarray
Freestream velocity vector.
omega : ndarray
Angular rate vector.
verbose : bool
"""
if verbose:
print()
prog = OneLineProgress(self.N_segments+1, msg=" Updating wake shape")
# Initialize storage
new_locs = np.zeros((self.N, self.N_segments, 3))
# Get starting locations (offset slightly from origin to avoid singularities)
curr_loc = self._vertices[:,0,:]+self._filament_dirs*0.01
if verbose: prog.display()
# Loop through filament segments (the first vertex never changes)
next_loc = np.zeros((self.N, 3))
for i in range(1,self.N_segments+1):
# Determine velocities at current point
v0 = velocity_from_body(curr_loc)+v_inf[np.newaxis,:]-vec_cross(omega, curr_loc)
v0 += self._get_velocity_from_other_filaments_and_edges(curr_loc, mu)
# Guess of next location
next_loc = curr_loc+self.l*v0/vec_norm(v0)[:,np.newaxis]
# Iteratively correct
for j in range(self._corrector_iterations):
# Velocities at next location
v1 = velocity_from_body(next_loc)+v_inf[np.newaxis,:]
v1 += self._get_velocity_from_other_filaments_and_edges(next_loc, mu)
# Correct location
v_avg = 0.5*(v0+v1)
next_loc = curr_loc+self.l*v_avg/vec_norm(v_avg)[:,np.newaxis]
# Store
new_locs[:,i-1,:] = np.copy(next_loc)
# Move downstream
curr_loc = np.copy(next_loc)
if verbose: prog.display()
# Store the new locations
self._vertices[:,1:,:] = new_locs
def _get_velocity_from_other_filaments_and_edges(self, points, mu):
# Determines the velocity at each point (assumed to be one on each filament in order) induced by all other filaments and Kutta edges
# Initialize storage
v_ind = np.zeros((self.N, 3))
# Get filament influences
with np.errstate(divide='ignore', invalid='ignore'):
V = self._get_filament_influences(points) # On the first segment of the first iteration, this will throw warnings because the initial point is on the filament; these can safely be ignored
# Loop through filaments
for i in range(self.N):
# Get indices of points not belonging to this filament
ind = [j for j in range(self.N) if j!=i]
# Add for outbound panels
outbound_panels = self.outbound_panels[i]
if len(outbound_panels)>0:
v_ind[ind] -= V[ind,i]*mu[outbound_panels[0]]
v_ind[ind] += V[ind,i]*mu[outbound_panels[1]]
# Add for inbound panels
inbound_panels = self.inbound_panels[i]
if len(inbound_panels)>0:
v_ind[ind] += V[ind,i]*mu[inbound_panels[0]]
v_ind[ind] -= V[ind,i]*mu[inbound_panels[1]]
# Get influence of edges
for edge in self._kutta_edges:
# Get indices of panels defining the edge
p_ind = edge.panel_indices
# Get infulence
v = edge.get_vortex_influence(points)
# Store
v_ind += -v*mu[p_ind[0]]
v_ind += v*mu[p_ind[1]]
return v_ind
class VelocityRelaxedWake(SegmentedWake):
"""Defines a segmented wake which is updated by shifting the segment vertices by the induced velocity on each iteration.
Parameters
----------
kutta_edges : list of KuttaEdge
List of Kutta edges which define this wake.
N_segments : int, optional
Number of segments to use for each filament. Defaults to 20.
segment_length : float, optional
Length of each discrete filament segment. Defaults to 1.0.
end_segment_infinite : bool, optional
Whether the final segment of the filament should be treated as infinite. Defaults to False.
K : float
Time stepping factor for shifting the filament vertices based on the local induced velocity and distance from the trailing edge.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Get kwargs
self._K = kwargs["K"]
def update(self, velocity_from_body, mu, v_inf, omega, verbose):
"""Updates the shape of the wake based on solved flow results.
Parameters
----------
velocity_from_body : callable
Function which will return the velocity induced by the body at a given set of points.
mu : ndarray
Vector of doublet strengths.
v_inf : ndarray
Freestream velocity vector.
omega : ndarray
Angular rate vector.
verbose : bool
"""
if verbose:
print()
prog = OneLineProgress(4, msg=" Updating wake shape")
# Reorder vertices for computation
points = self._vertices[:,1:,:].reshape((self.N*(self.N_segments), 3))
# Get velocity from body and rotation
v_ind = velocity_from_body(points)-vec_cross(omega, points)
if verbose: prog.display()
# Get velocity from wake elements
v_ind += self._get_velocity_from_filaments_and_edges(points, mu)
if verbose: prog.display()
# Calculate time-stepping parameter
U = norm(v_inf)
u = v_inf/U
dl = self._vertices[:,1:,:]-self._vertices[:,0,:][:,np.newaxis,:]
d = vec_inner(dl, u[np.newaxis,:])
dt = self._K*d/U
if verbose: prog.display()
# Shift vertices
self._vertices[:,1:,:] += dt[:,:,np.newaxis]*v_ind.reshape((self.N, self.N_segments, 3))
if verbose: prog.display()
def _get_velocity_from_filaments_and_edges(self, points, mu):
# Determines the velocity at the given points induced by all filaments and Kutta edges
# Initialize storage
v_ind = np.zeros_like(points)
# Get filament influences
with np.errstate(divide='ignore', invalid='ignore'):
V = self._get_filament_influences(points)
# Loop through filaments
for i in range(self.N):
# Add for outbound panels
outbound_panels = self.outbound_panels[i]
if len(outbound_panels)>0:
v_ind[:] -= V[:,i]*mu[outbound_panels[0]]
v_ind[:] += V[:,i]*mu[outbound_panels[1]]
# Add for inbound panels
inbound_panels = self.inbound_panels[i]
if len(inbound_panels)>0:
v_ind[:] += V[:,i]*mu[inbound_panels[0]]
v_ind[:] -= V[:,i]*mu[inbound_panels[1]]
# Get influence of edges
for edge in self._kutta_edges:
# Get indices of panels defining the edge
p_ind = edge.panel_indices
# Get infulence
v = edge.get_vortex_influence(points)
# Store
v_ind += -v*mu[p_ind[0]]
v_ind += v*mu[p_ind[1]]
return v_ind
class MarchingStreamlineWake(SegmentedWake):
"""Defines a segmented wake which is updated by adding a filament segment in the direction of the local velocity at each iteration.
Parameters
----------
kutta_edges : list of KuttaEdge
List of Kutta edges which define this wake.
N_segments : int, optional
Number of segments to use for each filament. Must be the same as the number of wake iterations for the solver. Defaults to 20.
segment_length : float, optional
Length of each discrete filament segment. Defaults to 1.0.
end_segment_infinite : bool, optional
Whether the final segment of the filament should be treated as infinite. Defaults to False.
corrector_iterations : int, optional
How many times to correct the streamline (velocity) prediction for each segment. Defaults to 1.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Get kwargs
self._corrector_iterations = kwargs.get("corrector_iterations", 1)
# Set segment counters
self.N_segments_final = copy.copy(self.N_segments) # How many segments this wake should have when done iterating. It starts with zero.
def set_filament_direction(self, v_inf, omega):
"""Resets the counter for determining how far along the wake has been solved. Does not update filament vertices (yeah it's a misnomer, but hey, consistency).
Parameters
----------
v_inf : ndarray
Freestream velocity vector.
omega : ndarray
Angular rate vector.
"""
# Get filament starting directions (required for offsetting the initial point to avoid infinite velocities)
origins = self._vertices[:,0,:]
self._filament_dirs = v_inf[np.newaxis,:]-vec_cross(omega, origins)
self._filament_dirs /= vec_norm(self._filament_dirs)[:,np.newaxis]
# Reset number of segments which have been set
self.N_segments = 0
def get_vtk_data(self, **kwargs):
"""Returns a list of vertices and line indices describing this wake.
Parameters
----------
length : float, optional
Length of the final filament segment, if set as infinite. Defaults to 20 times the filament segment length.
"""
# Get kwargs
l = kwargs.get("length", 20.0*self.l)
# Initialize storage
vertices = []
line_vertex_indices = []
# Loop through filaments
i = 0
for j in range(self.N):
# Add vertices
for k in range(self.N_segments+1):
vertices.append(self._vertices[j,k])
# Add indices
if k != self.N_segments:
line_vertex_indices.append([2, i+k, i+k+1])
# Increment index
i += self.N_segments+1
return vertices, line_vertex_indices, self.N*self.N_segments
def get_influence_matrix(self, **kwargs):
"""Create wake influence matrix; first index is the influenced points, second is the influencing panel, third is the velocity component.
Parameters
----------
points : ndarray
Array of points at which to calculate the influence.
N_panels : int
Number of panels in the mesh to which this wake belongs.
Returns
-------
ndarray
Trailing vortex influences.
"""
# Get kwargs
points = kwargs.get("points")
# Initialize storage
N = len(points)
vortex_influence_matrix = np.zeros((N, kwargs["N_panels"], 3))
# Get influence of edges
for | |
, 'Descending' , ), 97, (97, (), [ (8, 1, None, None) ,
(12, 17, None, None) , ], 1 , 1 , 4 , 1 , 108 , (3, 0, None, None) , 0 , )),
]
_JournalItem_vtables_dispatch_ = 1
_JournalItem_vtables_ = [
(( 'Application' , 'Application' , ), 61440, (61440, (), [ (16393, 10, None, "IID('{00063001-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 28 , (3, 0, None, None) , 0 , )),
(( 'Class' , 'Class' , ), 61450, (61450, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( 'Session' , 'Session' , ), 61451, (61451, (), [ (16393, 10, None, "IID('{00063002-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 36 , (3, 0, None, None) , 0 , )),
(( 'Parent' , 'Parent' , ), 61441, (61441, (), [ (16393, 10, None, None) , ], 1 , 2 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( 'Actions' , 'Actions' , ), 63511, (63511, (), [ (16393, 10, None, "IID('{0006303E-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 44 , (3, 0, None, None) , 0 , )),
(( 'Attachments' , 'Attachments' , ), 63509, (63509, (), [ (16393, 10, None, "IID('{0006303C-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 52 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 60 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 68 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 76 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( 'ConversationIndex' , 'ConversationIndex' , ), 113, (113, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 84 , (3, 0, None, None) , 0 , )),
(( 'ConversationTopic' , 'ConversationTopic' , ), 112, (112, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( 'CreationTime' , 'CreationTime' , ), 12295, (12295, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 92 , (3, 0, None, None) , 0 , )),
(( 'EntryID' , 'EntryID' , ), 61470, (61470, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( 'FormDescription' , 'FormDescription' , ), 61589, (61589, (), [ (16393, 10, None, "IID('{00063046-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 100 , (3, 0, None, None) , 0 , )),
(( 'GetInspector' , 'GetInspector' , ), 61502, (61502, (), [ (16393, 10, None, "IID('{00063005-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( 'Importance' , 'Importance' , ), 23, (23, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 108 , (3, 0, None, None) , 0 , )),
(( 'Importance' , 'Importance' , ), 23, (23, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( 'LastModificationTime' , 'LastModificationTime' , ), 12296, (12296, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 116 , (3, 0, None, None) , 0 , )),
(( 'MAPIOBJECT' , 'MAPIOBJECT' , ), 61696, (61696, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 64 , )),
(( 'MessageClass' , 'MessageClass' , ), 26, (26, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 124 , (3, 0, None, None) , 0 , )),
(( 'MessageClass' , 'MessageClass' , ), 26, (26, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( 'Mileage' , 'Mileage' , ), 34100, (34100, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 132 , (3, 0, None, None) , 0 , )),
(( 'Mileage' , 'Mileage' , ), 34100, (34100, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( 'NoAging' , 'NoAging' , ), 34062, (34062, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 140 , (3, 0, None, None) , 0 , )),
(( 'NoAging' , 'NoAging' , ), 34062, (34062, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( 'OutlookInternalVersion' , 'OutlookInternalVersion' , ), 34130, (34130, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 148 , (3, 0, None, None) , 0 , )),
(( 'OutlookVersion' , 'OutlookVersion' , ), 34132, (34132, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( 'Saved' , 'Saved' , ), 61603, (61603, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 156 , (3, 0, None, None) , 0 , )),
(( 'Sensitivity' , 'Sensitivity' , ), 54, (54, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( 'Sensitivity' , 'Sensitivity' , ), 54, (54, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 164 , (3, 0, None, None) , 0 , )),
(( 'Size' , 'Size' , ), 3592, (3592, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( 'Subject' , 'Subject' , ), 55, (55, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 172 , (3, 0, None, None) , 0 , )),
(( 'Subject' , 'Subject' , ), 55, (55, (), [ (8, 1, None, None) , ], 1 , 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.