input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
import os
import sys
import types
import unittest
import inspect
from lxml import etree
from . import XSL, XML, VERSION_LIST
from .core import get_module, get_stylesheet, get_source_version, get_migration_path, list_versions
from .main import parse_args
from .migrate import migrate_by_stylesheet, do_migration, get_params
from .utils import _print, _check, _decode_data
replace_list = [
('\n', ''),
('\t', ''),
(' ', ''),
]
def _replace(s, vals=replace_list):
if s is None:
return ''
_s = s
for u, v in vals:
_s = _s.replace(u, v)
return _s
def compare_elements(el1, el2):
"""Compare two elements and all their children
:return: True or False
"""
_check(el1, (etree._Element), TypeError)
_check(el2, (etree._Element), TypeError)
# https://stackoverflow.com/questions/7905380/testing-equivalence-of-xml-etree-elementtree
if el1.tag != el2.tag:
return False
if _replace(el1.text) != _replace(el2.text):
return False
if _replace(el1.tail) != _replace(el2.tail):
return False
if el1.attrib != el2.attrib:
return False
if len(el1) != len(el2):
return False
return all(compare_elements(e1, e2) for e1, e2 in zip(el1, el2))
class TestUtils(unittest.TestCase):
def test_check(self):
"""Test that _check works"""
with self.assertRaisesRegex(TypeError, r"object '1' is not of class <class 'str'>"):
_check(1, str, TypeError)
with self.assertRaises(TypeError):
_check(1, str, TypeError, message="")
def test_migrate(self):
"""Test that migrate works"""
# exceptions
with self.assertRaises(TypeError):
migrate_by_stylesheet(1, 2)
with self.assertRaises(IOError):
migrate_by_stylesheet('file.xml', 'file.xsl')
def test_parse_args(self):
"""Test correct arguments"""
# default with -t/--target-version
args = parse_args("file.xml -v -t 1.0")
self.assertEqual(args.infile, "file.xml")
self.assertEqual(args.target_version, "1.0")
self.assertEqual(args.outfile, "file_v1.0.xml")
self.assertFalse(args.list_versions)
# specify outfile
args = parse_args("file.xml -v -t 1.0 -o my_output.xml")
self.assertEqual(args.outfile, "my_output.xml")
# list valid versions
args = parse_args("-l")
self.assertEqual(args.infile, '')
self.assertEqual(args.target_version, VERSION_LIST[-1])
self.assertIsNone(args.outfile)
self.assertTrue(args.list_versions)
self.assertFalse(args.show_version)
# show version in file
args = parse_args("-v -s file.xml")
self.assertEqual(args.infile, 'file.xml')
self.assertEqual(args.target_version, VERSION_LIST[-1])
# self.assertEqual(args.outfile, 'file_v0.8.0.dev1.xml')
self.assertIsNone(args.outfile)
self.assertFalse(args.list_versions)
self.assertTrue(args.show_version)
# show package version
args = parse_args("-v -V")
self.assertEqual(args.infile, '')
self.assertIsNone(args.outfile)
self.assertTrue(args.version)
self.assertFalse(args.list_versions)
self.assertFalse(args.show_version)
def test_get_stylesheet(self):
"""Given versions return the correct stylesheet to use"""
stylesheet = get_stylesheet("1", "2")
self.assertEqual(os.path.basename(stylesheet), 'migrate_v1_to_v2.xsl')
self.assertTrue(os.path.exists(stylesheet))
original = os.path.join(XML, 'original.xml')
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details="Nothing much")
migrated = etree.ElementTree(etree.XML(_migrated))
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
# self.assertTrue(False)
with self.assertRaises(OSError):
get_stylesheet("nothing", "something")
def test_get_source_version(self):
"""Obtain the version in the original"""
source_version = get_source_version(os.path.join(XML, 'original.xml'))
self.assertEqual(source_version, '1')
fn_v07 = os.path.join(XML, 'test2.sff')
source_version_v07 = get_source_version(fn_v07)
self.assertEqual(source_version_v07, '0.7.0.dev0')
fn_v08 = os.path.join(XML, 'test2_v0.8.0.dev1.sff')
source_version_v08 = get_source_version(fn_v08)
self.assertEqual(source_version_v08, '0.8.0.dev1')
def test_get_migration_path(self):
"""Determine the sequence of migrations to perform"""
version_list = ['1', '2', '3', '4', '5', '6']
migration_path = get_migration_path('2', '6', version_list=version_list)
self.assertEqual(migration_path, [('2', '3'), ('3', '4'), ('4', '5'), ('5', '6')])
# cannot find start
with self.assertRaisesRegex(ValueError, r".*invalid migration start.*"):
get_migration_path('0', '6', version_list=version_list)
# cannot find end
with self.assertRaisesRegex(ValueError, r".*invalid migration end.*"):
get_migration_path('1', '9', version_list=version_list)
def test_do_migration_example(self):
"""Toy migration example"""
version_list = ['1', '2']
cmd = "{infile} -v --target-version 2 --outfile {outfile}".format(
infile=os.path.join(XML, "original.xml"),
outfile=os.path.join(XML, "my_output.xml")
)
args = parse_args(cmd)
_text = "48ec3e2ab568763658fc3f5430b851ceaf1593d6" # secrets.token_hex(20)
status = do_migration(
args,
value_list=[_text],
version_list=version_list,
)
_output = os.path.join(XML, "original_v2.xml")
self.assertTrue(os.path.exists(_output))
self.assertEqual(status, os.EX_OK)
output = etree.parse(_output)
self.assertEqual(output.xpath('/segmentation/details/text()')[0], _text)
os.remove(args.outfile)
def test_do_migration(self):
"""Do an actual migration using the convenience function"""
# try a null migration
target_version = "0.8.0.dev1"
outfile = os.path.join(XML, 'my_file_out.sff')
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2_v0.8.0.dev1.sff'),
target_version=target_version,
outfile=outfile,
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertFalse(os.path.exists(outfile)) # the file was not created
# try an actual migrations
cmd = "{infile} -v --target-version {target_version} --outfile {outfile}".format(
infile=os.path.join(XML, 'test2.sff'),
target_version=target_version,
outfile=outfile
)
args = parse_args(cmd)
status = do_migration(args)
self.assertEqual(status, os.EX_OK)
self.assertTrue(os.path.exists(outfile)) # the file was not created
in_version = get_source_version(args.infile)
out_version = get_source_version(outfile)
self.assertNotEqual(in_version, out_version)
self.assertEqual(out_version, target_version)
os.remove(outfile)
def test_get_module(self):
"""Check that we can get the right module for this migration"""
module = get_module('1', '2')
self.assertIsInstance(module, types.ModuleType)
def test_get_params(self):
"""Test getting params"""
module = get_module('1', '2')
_text = "ce3c90151bb3c803c8e6570ee7d5845ac3c96c38" # secrets.token_hex(20)
params = get_params(module.PARAM_LIST, value_list=[_text])
self.assertIsInstance(params, dict)
self.assertEqual(len(params), 1)
with self.assertRaises(ValueError):
get_params(module.PARAM_LIST, value_list=[_text, _text])
def test_list_versions(self):
"""Test that we can list the supported versions"""
args = parse_args("-v -l")
status, version_count = list_versions()
self.assertEqual(status, os.EX_OK)
self.assertEqual(version_count, 2)
class TestMigrations(unittest.TestCase):
def test_original_to_add_field(self):
"""Test adding a field to the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'add_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_add_field.xsl')
# we pass the value of the `details` param as follows:
# A = reference.xpath(<xpath>)[0]
# etree.XSLT.strparam(A) - handle a possibly quoted string
details_text = reference.xpath('/segmentation/details/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_details=details_text) # bytes
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_drop_field(self):
"""Test dropping a field from the original"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'drop_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_drop_field.xsl')
with self.assertWarns(UserWarning):
_migrated = migrate_by_stylesheet(original, stylesheet, verbose=True)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_rename_field(self):
"""Test changing a field by renaming it"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_field.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_field.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_add_attribute(self):
"""Test changing a field by adding an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_add_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_add_attribute.xsl')
lang_text = reference.xpath('/segmentation/name/@lang')[0]
_migrated = migrate_by_stylesheet(original, stylesheet,
segmentation_name_lang=lang_text)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
# sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
# sys.stderr.write('\n')
# sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_drop_attribute(self):
"""Test changing a field by dropping an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_drop_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_drop_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
self.assertTrue(same)
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
def test_original_to_change_field_change_value(self):
"""Test changing a field by changing the value"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_change_value.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_change_value.xsl')
_segment_name = reference.xpath('/segmentation/segment[@id=1]/name/text()')[0]
_migrated = migrate_by_stylesheet(original, stylesheet, segment_name=_segment_name)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_to_change_field_rename_attribute(self):
"""Test changing a field by renaming an attribute"""
original = os.path.join(XML, 'original.xml')
reference = etree.parse(os.path.join(XML, 'change_field_rename_attribute.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_field_rename_attribute.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
def test_original_list_to_change_value_list(self):
"""Test changing all the values for a list"""
original = os.path.join(XML, 'original_list.xml')
reference = etree.parse(os.path.join(XML, 'change_value_list.xml'))
stylesheet = os.path.join(XSL, 'original_to_change_value_list.xsl')
_migrated = migrate_by_stylesheet(original, stylesheet)
migrated = etree.ElementTree(etree.XML(_migrated))
same = compare_elements(reference.getroot(), migrated.getroot())
sys.stderr.write('reference:\n' + etree.tostring(reference).decode('utf-8'))
sys.stderr.write('\n')
sys.stderr.write('migrated:\n' + etree.tostring(migrated).decode('utf-8'))
self.assertTrue(same)
class TestEMDBSFFMigrations(unittest.TestCase):
def test_migrate_mesh_exceptions(self):
"""Test that we capture exceptions"""
module = get_module('0.7.0.dev0', '0.8.0.dev1')
# create an empty mesh
mesh = etree.Element("mesh")
with self.assertRaisesRegex(ValueError, r".*invalid endianness.*"):
module.migrate_mesh(mesh, endianness='other')
with self.assertRaisesRegex(ValueError, r".*invalid triangles mode.*"):
module.migrate_mesh(mesh, triangles_mode='other')
with self.assertRaisesRegex(ValueError, r".*invalid vertices mode.*"):
module.migrate_mesh(mesh, vertices_mode='other')
# no geometry
verts, norms, tris = module.migrate_mesh(mesh)
self.assertIsInstance(verts, etree._Element)
self.assertEqual(int(verts.get("num_vertices")), 0)
# let's get the signature of the migrate_mesh function to get the default values for kwargs
signature = inspect.signature(module.migrate_mesh)
# verts
self.assertEqual(verts.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(verts.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(verts.get("data"), "")
# norms
self.assertEqual(norms.get("mode"), signature.parameters['vertices_mode'].default)
self.assertEqual(norms.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(norms.get("data"), "")
# tris
self.assertEqual(tris.get("mode"), signature.parameters['triangles_mode'].default)
self.assertEqual(tris.get("endianness"), signature.parameters['endianness'].default)
self.assertEqual(tris.get("data"), "")
def test_v0_7_0_dev0_to_v0_8_0_dev0(self):
"""Test migration from v0.7.0.dev0 to v0.8.0.dev1"""
original = os.path.join(XML, 'test2.sff')
stylesheet = get_stylesheet("0.7.0.dev0", "0.8.0.dev1")
# phase I migration using stylesheet
_migrated = migrate_by_stylesheet(original, stylesheet)
# convert migration to an ElementTree object
migrated = etree.ElementTree(etree.XML(_migrated))
_original = etree.parse(original)
segments = _original.xpath('/segmentation/segmentList/segment')
_print(segments)
segment_meshes = dict()
module = get_module('0.7.0.dev0', '0.8.0.dev1')
for segment in segments:
segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
_vertices, _normals, _triangles = module.migrate_mesh(
mesh)
segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = _vertices, _normals, _triangles
migrated_segments = migrated.xpath('/segmentation/segment_list/segment')
for migrated_segment in migrated_segments:
for migrated_mesh in migrated_segment.xpath('mesh_list/mesh'):
_vertices, _normals, _triangles = segment_meshes[int(migrated_segment.get("id"))][
int(migrated_mesh.get("id"))]
migrated_mesh.insert(0, _vertices)
migrated_mesh.insert(1, _normals)
migrated_mesh.insert(2, _triangles)
# let's see what it looks like
migrated_decoded = etree.tostring(migrated, xml_declaration=True, encoding='UTF-8', pretty_print=True).decode(
'utf-8')
# sys.stderr.write('migrated:\n' + migrated_decoded)
# with open(os.path.join(XML, 'test2_v0.8.0.dev1.sff'), 'w') as f:
# f.write(migrated_decoded)
def test_meshes_equal_v0_7_0_dev0_vs_v0_8_0_dev0(self):
"""Test that the mesh data is the same
We only compare surface vertices. Normal vertices correspond one-to-one to surface vertices and are not relevant
to triangles.
"""
v7 = os.path.join(XML, 'test7.sff')
v8 = os.path.join(XML, 'test7_v0.8.0.dev1.sff')
fv7 = etree.parse(v7)
fv8 = etree.parse(v8)
fv7_segments = fv7.xpath('/segmentation/segmentList/segment')
# extract vertices, normals and triangles
fv7_segment_meshes = dict()
for segment in fv7_segments:
fv7_segment_meshes[int(segment.get("id"))] = dict()
for mesh in segment.xpath('meshList/mesh'):
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))] = {
'surface_vertices': dict(),
'normal_vertices': dict(),
'triangles': dict(),
}
vertex_list = next(mesh.iter('vertexList'))
for vertex in vertex_list:
if vertex.get('designation') == 'surface' or vertex.get('designation') is None:
fv7_segment_meshes[int(segment.get("id"))][int(mesh.get("id"))]['surface_vertices'][
int(vertex.get('vID'))] = tuple(map(lambda v: float(v.text), vertex.xpath('*')))
| |
from typing import List, Dict, Sequence, Union, Tuple
from numbers import Number
import random
import numpy as np
from toolz import curry
from toolz.curried import get
from common import _tuple
__all__ = [
"resize", "resized_crop", "center_crop", "drop_boundary_bboxes",
"to_absolute_coords", "to_percent_coords", "hflip", "hflip2",
"vflip", "vflip2", "random_sample_crop", "move"
]
def iou_1m(box, boxes):
r"""
Calculates one-to-many ious.
Parameters
----------
box : ``Sequences[Number]``
A bounding box.
boxes : ``array_like``
Many bounding boxes.
Returns
-------
ious : ``array_like``
IoUs between the box and boxes.
"""
xi1 = np.maximum(boxes[..., 0], box[0])
yi1 = np.maximum(boxes[..., 1], box[1])
xi2 = np.minimum(boxes[..., 2], box[2])
yi2 = np.minimum(boxes[..., 3], box[3])
xdiff = xi2 - xi1
ydiff = yi2 - yi1
inter_area = xdiff * ydiff
box_area = (box[2] - box[0]) * (box[3] - box[1])
boxes_area = (boxes[..., 2] - boxes[..., 0]) * \
(boxes[..., 3] - boxes[..., 1])
union_area = boxes_area + box_area - inter_area
iou = inter_area / union_area
iou[xdiff < 0] = 0
iou[ydiff < 0] = 0
return iou
def random_sample_crop(anns, size, min_iou, min_ar, max_ar, max_attemps=50):
"""
Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
min_iou : ``float``
Minimal iou between the objects and the cropped image.
min_ar : ``Number``
Minimal aspect ratio.
max_ar : ``Number``
Maximum aspect ratio.
max_attemps: ``int``
Maximum attemps to try.
"""
width, height = size
bboxes = np.stack([ann['bbox'] for ann in anns])
bboxes[:, 2:] += bboxes[:, :2]
for _ in range(max_attemps):
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
if h / w < min_ar or h / w > max_ar:
continue
l = random.uniform(0, width - w)
t = random.uniform(0, height - h)
r = l + w
b = t + h
patch = np.array([l, t, r, b])
ious = iou_1m(patch, bboxes)
if ious.min() < min_iou:
continue
centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0
mask = (l < centers[:, 0]) & (centers[:, 0] < r) & (
t < centers[:, 1]) & (centers[:, 1] < b)
if not mask.any():
continue
indices = np.nonzero(mask)[0].tolist()
return get(indices, anns), l, t, w, h
return None
@curry
def resized_crop(anns, left, upper, width, height, output_size, min_area_frac):
anns = crop(anns, left, upper, width, height, min_area_frac)
size = (width, height)
# if drop:
# anns = drop_boundary_bboxes(anns, size)
anns = resize(anns, size, output_size)
return anns
@curry
def drop_boundary_bboxes(anns, size):
r"""
Drop bounding boxes whose centers are out of the image boundary.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
width, height = size
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
x = (l + w) / 2.
y = (t + h) / 2.
if 0 <= x <= width and 0 <= y <= height:
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def center_crop(anns, size, output_size):
r"""
Crops the bounding boxes of the given PIL Image at the center.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
output_size : ``Union[Number, Sequence[int]]``
Desired output size of the crop. If size is an int instead of sequence like (w, h),
a square crop (size, size) is made.
"""
output_size = _tuple(output_size, 2)
output_size = tuple(int(x) for x in output_size)
w, h = size
th, tw = output_size
upper = int(round((h - th) / 2.))
left = int(round((w - tw) / 2.))
return crop(anns, left, upper, th, tw)
@curry
def crop(anns, left, upper, width, height, minimal_area_fraction=0.25):
r"""
Crop the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
left: ``int``
Left pixel coordinate.
upper: ``int``
Upper pixel coordinate.
width: ``int``
Width of the cropped image.
height: ``int``
Height of the cropped image.
minimal_area_fraction : ``int``
Minimal area fraction requirement.
"""
new_anns = []
for ann in anns:
l, t, w, h = ann['bbox']
area = w * h
l -= left
t -= upper
if l + w >= 0 and l <= width and t + h >= 0 and t <= height:
if l < 0:
w += l
l = 0
if t < 0:
h += t
t = 0
w = min(width - l, w)
h = min(height - t, h)
if w * h < area * minimal_area_fraction:
continue
new_anns.append({**ann, "bbox": [l, t, w, h]})
return new_anns
@curry
def resize(anns, size, output_size):
"""
Parameters
----------
anns : List[Dict]
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : Sequence[int]
Size of the original image.
output_size : Union[Number, Sequence[int]]
Desired output size. If size is a sequence like (w, h), the output size will be matched to this.
If size is an int, the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if width > height, then image will be rescaled to
(output_size * width / height, output_size)
"""
w, h = size
if isinstance(output_size, int):
if (w <= h and w == output_size) or (h <= w and h == output_size):
return anns
if w < h:
ow = output_size
sw = sh = ow / w
else:
oh = output_size
sw = sh = oh / h
else:
ow, oh = output_size
sw = ow / w
sh = oh / h
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= sw
bbox[1] *= sh
bbox[2] *= sw
bbox[3] *= sh
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_percent_coords(anns, size):
r"""
Convert absolute coordinates of the bounding boxes to percent cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] /= w
bbox[1] /= h
bbox[2] /= w
bbox[3] /= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def to_absolute_coords(anns, size):
r"""
Convert percent coordinates of the bounding boxes to absolute cocoordinates.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= w
bbox[1] *= h
bbox[2] *= w
bbox[3] *= h
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] = w - (bbox[0] + bbox[2])
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def hflip2(anns, size):
"""
Horizontally flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of [l, t, r, b].
size : ``Sequence[int]``
Size of the original image.
"""
w, h = size
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
l = bbox[0]
bbox[0] = w - bbox[2]
bbox[2] = w - l
new_anns.append({**ann, "bbox": bbox})
return new_anns
@curry
def vflip(anns, size):
"""
Vertically flip the bounding boxes of the given PIL Image.
Parameters
----------
anns : ``List[Dict]``
Sequences of annotation of objects, containing `bbox` of | |
std, c='m', marker='*')
ax.scatter(key, std1, c='b', marker='o')
ax.scatter(key, std2, c='y', marker='s')
if key > maxx:
maxx = key
print key, std, std1, std2
#label
ax.scatter(key-shift, std, c='m', marker='*', label=r'$\sigma (e)$')
ax.scatter(key, std1, c='b', marker='o', label=r'$\sigma (e_{1})$')
ax.scatter(key, std2, c='y', marker='s', label=r'$\sigma (e_{2})$')
#sort and interpolate
values = np.asarray(values)
frames = np.asarray(frames)
srt = np.argsort(frames)
x = np.arange(frames.min(), frames.max()+1)
f = interpolate.interp1d(frames[srt], values[srt], kind='cubic')
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=20)
try:
msk = vals < reqe
minn = np.min(x[msk])
plt.text(np.mean(frames), 8e-6, r'Flats Required $\raise-.5ex\hbox{$\buildrel>\over\sim$}$ %i' % np.ceil(minn),
ha='center', va='center', fontsize=11)
except:
pass
ax.fill_between(np.arange(maxx+10), np.ones(maxx+10)*reqe, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=reqe, c='g', ls='--', label='Requirement')
plt.text(1, 0.9*reqe, '%.1e' % reqe, ha='left', va='top', fontsize=11)
ax.set_yscale('log')
ax.set_ylim(5e-6, 1e-4)
ax.set_xlim(0, maxx+1)
ax.set_xlabel('Number of Flat Fields Median Combined')
ax.set_ylabel(r'$\sigma (e_{i})\ , \ \ \ i \in [1,2]$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2)
plt.savefig(outdir+'/FlatCalibrationsigmaE.pdf')
plt.close()
#same for R2s
fig = plt.figure()
plt.title(r'VIS Flat Field Calibration: $\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
ax = fig.add_subplot(111)
ax.axhline(y=0, c='k', ls=':')
maxx = 0
frames = []
values = []
#loop over the number of frames combined
for key in res:
dR2 = np.asarray(res[key][3])
#std = np.std(dR2) / ref['R2']
std = np.std(dR2) / np.mean(dR2)
frames.append(key)
values.append(std)
print key, std
ax.scatter(key, std, c='b', marker='s', s=35, zorder=10)
if key > maxx:
maxx = key
#for the legend
ax.scatter(key, std, c='b', marker='s', label=r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
#sort and interpolate
values = np.asarray(values)
frames = np.asarray(frames)
srt = np.argsort(frames)
x = np.arange(frames.min(), frames.max())
f = interpolate.interp1d(frames[srt], values[srt], kind='cubic')
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=10)
try:
msk = vals < reqr2
minn = np.min(x[msk])
plt.text(np.mean(frames), 2e-5, r'Flats Required $\raise-.5ex\hbox{$\buildrel>\over\sim$}$ %i' % np.ceil(minn),
fontsize=11, ha='center', va='center')
except:
pass
#show the requirement
ax.fill_between(np.arange(maxx+10), np.ones(maxx+10)*reqr2, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=reqr2, c='g', ls='--', label='Requirement')
plt.text(1, 0.9*reqr2, '%.1e' % reqr2, ha='left', va='top', fontsize=11)
ax.set_yscale('log')
ax.set_ylim(5e-6, 1e-3)
ax.set_xlim(0, maxx+1)
ax.set_xlabel('Number of Flat Fields Median Combined')
ax.set_ylabel(r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8 )
plt.savefig(outdir+'/FlatCalibrationSigmaR2.pdf')
plt.close()
print '\nDelta results:'
#loop over the number of frames combined
for key in res:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(r'VIS Flat Field Calibration (%i exposures): $\delta e$' % key)
de1 = np.asarray(res[key][4])
de2 = np.asarray(res[key][5])
de = np.asarray(res[key][6])
avg1 = np.mean(de1)**2
avg2 = np.mean(de2)**2
avg = np.mean(de)**2
#write down the values
print key, avg, avg1, avg2
plt.text(0.08, 0.9, r'$\left< \delta e_{1} \right>^{2} = %e$' %avg1, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.85, r'$\left< \delta e_{2}\right>^{2} = %e$' %avg2, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.8, r'$\left< \delta | \bar{e} |\right>^{2} = %e$' %avg, fontsize=10, transform=ax.transAxes)
ax.hist(de, bins=15, color='y', alpha=0.2, label=r'$\delta | \bar{e} |$', normed=True, log=True)
ax.hist(de1, bins=15, color='b', alpha=0.5, label=r'$\delta e_{1}$', normed=True, log=True)
ax.hist(de2, bins=15, color='g', alpha=0.3, label=r'$\delta e_{2}$', normed=True, log=True)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\delta e_{i}\ , \ \ \ i \in [1,2]$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2)
plt.savefig(outdir+'/FlatCalibrationEDelta%i.pdf' % key)
plt.close()
#same for R2s
for key in res:
fig = plt.figure()
plt.title(r'VIS Flat Field Calibration (%i exposures): $\frac{\delta R^{2}}{R_{ref}^{2}}$' % key)
ax = fig.add_subplot(111)
dR2 = np.asarray(res[key][7])
avg = np.mean(dR2/ref['R2'])**2
ax.hist(dR2, bins=15, color='y', label=r'$\frac{\delta R^{2}}{R_{ref}^{2}}$', normed=True, log=True)
print key, avg
plt.text(0.1, 0.9, r'$\left<\frac{\delta R^{2}}{R^{2}_{ref}}\right>^{2} = %e$' %avg,
fontsize=10, transform=ax.transAxes)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\frac{\delta R^{2}}{R_{ref}^{2}}$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
plt.savefig(outdir+'/FlatCalibrationDeltaSize%i.pdf' % key)
plt.close()
def findTolerableError(log, file='data/psf4x.fits', oversample=4.0, psfs=10000, iterations=7, sigma=0.75):
"""
Calculate ellipticity and size for PSFs of different scaling when there is a residual
pixel-to-pixel variations.
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
#PSF scalings for the peak pixel, in electrons
scales = np.random.random_integers(1e2, 2e5, psfs)
#set the scale for shape measurement
settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma)
#residual from a perfect no pixel-to-pixel non-uniformity
residuals = np.logspace(-7, -1.6, 9)[::-1] #largest first
tot = residuals.size
res = {}
for i, residual in enumerate(residuals):
print'%i / %i' % (i+1, tot)
R2 = []
e1 = []
e2 = []
e = []
#loop over the PSFs
for scale in scales:
#random residual pixel-to-pixel variations
if oversample < 1.1:
residualSurface = np.random.normal(loc=1.0, scale=residual, size=data.shape)
elif oversample == 4.0:
tmp = np.random.normal(loc=1.0, scale=residual, size=(170, 170))
residualSurface = zoom(tmp, 4.013, order=0)
else:
sys.exit('ERROR when trying to generate a blocky pixel-to-pixel non-uniformity map...')
#make a copy of the PSF and scale it with the given scaling
#and then multiply with a residual pixel-to-pixel variation
tmp = data.copy() * scale * residualSurface
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(tmp.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2))
res[residual] = out
return res
def plotTolerableErrorR2(res, output, req=1e-4):
fig = plt.figure()
plt.title(r'VIS Flat Fielding')
ax = fig.add_subplot(111)
#loop over the number of bias frames combined
vals = []
for key in res.keys():
dR2 = res[key]['R2']
normed = np.std(dR2) / np.mean(dR2)
ax.scatter(key, normed, c='m', marker='*', s=35)
vals.append(normed)
print key, normed
#for the legend
ax.scatter(key, normed, c='m', marker='*', label=r'$\frac{\sigma(R^{2})}{R_{ref}^{2}}$')
#show the requirement
ks = np.asarray(res.keys())
ran = np.linspace(ks.min() * 0.99, ks.max() * 1.01)
ax.fill_between(ran, np.ones(ran.size) * req, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=req, c='g', ls='--', label='Requirement')
#find the crossing
srt = np.argsort(ks)
values = np.asarray(vals)
f = interpolate.interp1d(ks[srt], values[srt], kind='cubic')
x = np.logspace(np.log10(ks.min()), np.log10(ks.max()), 100)
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=10)
msk = vals < req
maxn = np.max(x[msk])
plt.text(1e-5, 2e-5, r'Error must be $\leq %.2e$ per cent' % (maxn*100),
fontsize=11, ha='center', va='center')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-7, 1e-2)
ax.set_xlim(ks.min() * 0.99, ks.max() * 1.01)
ax.set_xlabel('Error in the Flat Field Map')
ax.set_ylabel(r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='upper left')
plt.savefig(output)
plt.close()
def plotTolerableErrorE(res, output, req=3e-5):
fig = plt.figure()
plt.title(r'VIS Flat Fielding')
ax = fig.add_subplot(111)
#loop over the number of bias frames combined
vals = []
for key in res.keys():
e1 = np.std(res[key]['e1'])
e2 = np.std(res[key]['e'])
e = np.std(res[key]['e'])
vals.append(e)
ax.scatter(key, e1, c='m', marker='*', s=35)
ax.scatter(key, e2, c='y', marker='s', s=35)
ax.scatter(key, e, c='r', marker='o', s=35)
print key, e, e1, e2
#for the legend
ax.scatter(key, e1, c='m', marker='*', label=r'$\sigma(e_{1})$')
ax.scatter(key, e2, c='y', marker='s', label=r'$\sigma(e_{2})$')
ax.scatter(key, e, c='r', marker='o', label=r'$\sigma(e)$')
#show the requirement
ks = np.asarray(res.keys())
ran = np.linspace(ks.min() * 0.99, ks.max() * 1.01)
ax.fill_between(ran, np.ones(ran.size) * req, 1.0, facecolor='red', alpha=0.08)
ax.axhline(y=req, c='g', ls='--', label='Requirement')
#find the crossing
srt = np.argsort(ks)
values = np.asarray(vals)
f = interpolate.interp1d(ks[srt], values[srt], kind='cubic')
x = np.logspace(np.log10(ks.min()), np.log10(ks.max()), 100)
vals = f(x)
ax.plot(x, vals, ':', c='0.2', zorder=10)
msk = vals < req
maxn = np.max(x[msk])
plt.text(1e-5, 2e-5, r'Error for $e$ must be $\leq %.2e$ per cent' % (maxn*100),
fontsize=11, ha='center', va='center')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-7, 1e-2)
ax.set_xlim(ks.min() * 0.99, ks.max() * 1.01)
ax.set_xlabel('Error in the Flat Field Map')
ax.set_ylabel(r'$\sigma (e_{i})\ , \ \ \ i \in [1,2]$')
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='upper left')
plt.savefig(output)
plt.close()
def testNoFlatfieldingEffects(log, file='data/psf1x.fits', oversample=1.0, psfs=500):
"""
Calculate ellipticity and size variance and error in case of no pixel-to-pixel flat field correction.
"""
#read in PSF and renormalize it
data = pf.getdata(file)
data /= np.max(data)
data *= 1e5
#derive reference values
settings = dict(sampling=1.0/oversample)
sh = shape.shapeMeasurement(data.copy(), log, **settings)
reference = sh.measureRefinedEllipticity()
print reference
#residual
residual = pf.getdata('data/VISFlatField2percent.fits') #'data/VISFlatField1percent.fits'
if oversample == 4.0:
residual = zoom(zoom(residual, 2, order=0), 2, order=0)
elif oversample == 1.0:
pass
else:
print 'ERROR--cannot do arbitrary oversampling...'
#random positions for the PSFs, these positions are the lower corners
xpositions = np.random.random_integers(0, residual.shape[1] - data.shape[1], psfs)
ypositions = np.random.random_integers(0, residual.shape[0] - data.shape[0], psfs)
#data storage
out = {}
de1 = []
de2 = []
de = []
R2 = []
dR2 = []
e1 = []
e2 = []
e = []
rnd = 1
tot = xpositions.size
#loop over the PSFs
for xpos, ypos in zip(xpositions, ypositions):
print'%i / %i' % (rnd, tot)
rnd += 1
#make a copy of the PSF
tmp = data.copy()
#get the underlying residual surface ond multiple the PSF with the | |
<filename>gen/apache/aurora/api/ttypes.py<gh_stars>1-10
#
# Autogenerated by Thrift Compiler (1.0.0-dev)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ResponseCode:
INVALID_REQUEST = 0
OK = 1
ERROR = 2
WARNING = 3
AUTH_FAILED = 4
LOCK_ERROR = 5
ERROR_TRANSIENT = 6
_VALUES_TO_NAMES = {
0: "INVALID_REQUEST",
1: "OK",
2: "ERROR",
3: "WARNING",
4: "AUTH_FAILED",
5: "LOCK_ERROR",
6: "ERROR_TRANSIENT",
}
_NAMES_TO_VALUES = {
"INVALID_REQUEST": 0,
"OK": 1,
"ERROR": 2,
"WARNING": 3,
"AUTH_FAILED": 4,
"LOCK_ERROR": 5,
"ERROR_TRANSIENT": 6,
}
class MaintenanceMode:
NONE = 1
SCHEDULED = 2
DRAINING = 3
DRAINED = 4
_VALUES_TO_NAMES = {
1: "NONE",
2: "SCHEDULED",
3: "DRAINING",
4: "DRAINED",
}
_NAMES_TO_VALUES = {
"NONE": 1,
"SCHEDULED": 2,
"DRAINING": 3,
"DRAINED": 4,
}
class LockValidation:
"""
Defines the required lock validation level.
"""
CHECKED = 0
UNCHECKED = 1
_VALUES_TO_NAMES = {
0: "CHECKED",
1: "UNCHECKED",
}
_NAMES_TO_VALUES = {
"CHECKED": 0,
"UNCHECKED": 1,
}
class Mode:
"""
The mode for a volume mount
"""
RW = 1
RO = 2
_VALUES_TO_NAMES = {
1: "RW",
2: "RO",
}
_NAMES_TO_VALUES = {
"RW": 1,
"RO": 2,
}
class CronCollisionPolicy:
"""
Defines the policy for launching a new cron job when one is already running.
"""
KILL_EXISTING = 0
CANCEL_NEW = 1
RUN_OVERLAP = 2
_VALUES_TO_NAMES = {
0: "KILL_EXISTING",
1: "CANCEL_NEW",
2: "RUN_OVERLAP",
}
_NAMES_TO_VALUES = {
"KILL_EXISTING": 0,
"CANCEL_NEW": 1,
"RUN_OVERLAP": 2,
}
class ScheduleStatus:
"""
States that a task may be in.
"""
INIT = 11
THROTTLED = 16
PENDING = 0
ASSIGNED = 9
STARTING = 1
RUNNING = 2
FINISHED = 3
PREEMPTING = 13
RESTARTING = 12
DRAINING = 17
FAILED = 4
KILLED = 5
KILLING = 6
LOST = 7
_VALUES_TO_NAMES = {
11: "INIT",
16: "THROTTLED",
0: "PENDING",
9: "ASSIGNED",
1: "STARTING",
2: "RUNNING",
3: "FINISHED",
13: "PREEMPTING",
12: "RESTARTING",
17: "DRAINING",
4: "FAILED",
5: "KILLED",
6: "KILLING",
7: "LOST",
}
_NAMES_TO_VALUES = {
"INIT": 11,
"THROTTLED": 16,
"PENDING": 0,
"ASSIGNED": 9,
"STARTING": 1,
"RUNNING": 2,
"FINISHED": 3,
"PREEMPTING": 13,
"RESTARTING": 12,
"DRAINING": 17,
"FAILED": 4,
"KILLED": 5,
"KILLING": 6,
"LOST": 7,
}
class JobUpdateStatus:
"""
States that a job update may be in.
"""
ROLLING_FORWARD = 0
ROLLING_BACK = 1
ROLL_FORWARD_PAUSED = 2
ROLL_BACK_PAUSED = 3
ROLLED_FORWARD = 4
ROLLED_BACK = 5
ABORTED = 6
ERROR = 7
FAILED = 8
ROLL_FORWARD_AWAITING_PULSE = 9
ROLL_BACK_AWAITING_PULSE = 10
_VALUES_TO_NAMES = {
0: "ROLLING_FORWARD",
1: "ROLLING_BACK",
2: "ROLL_FORWARD_PAUSED",
3: "ROLL_BACK_PAUSED",
4: "ROLLED_FORWARD",
5: "ROLLED_BACK",
6: "ABORTED",
7: "ERROR",
8: "FAILED",
9: "ROLL_FORWARD_AWAITING_PULSE",
10: "ROLL_BACK_AWAITING_PULSE",
}
_NAMES_TO_VALUES = {
"ROLLING_FORWARD": 0,
"ROLLING_BACK": 1,
"ROLL_FORWARD_PAUSED": 2,
"ROLL_BACK_PAUSED": 3,
"ROLLED_FORWARD": 4,
"ROLLED_BACK": 5,
"ABORTED": 6,
"ERROR": 7,
"FAILED": 8,
"ROLL_FORWARD_AWAITING_PULSE": 9,
"ROLL_BACK_AWAITING_PULSE": 10,
}
class JobUpdateAction:
"""
Job update actions that can be applied to job instances.
"""
INSTANCE_UPDATED = 1
INSTANCE_ROLLED_BACK = 2
INSTANCE_UPDATING = 3
INSTANCE_ROLLING_BACK = 4
INSTANCE_UPDATE_FAILED = 5
INSTANCE_ROLLBACK_FAILED = 6
_VALUES_TO_NAMES = {
1: "INSTANCE_UPDATED",
2: "INSTANCE_ROLLED_BACK",
3: "INSTANCE_UPDATING",
4: "INSTANCE_ROLLING_BACK",
5: "INSTANCE_UPDATE_FAILED",
6: "INSTANCE_ROLLBACK_FAILED",
}
_NAMES_TO_VALUES = {
"INSTANCE_UPDATED": 1,
"INSTANCE_ROLLED_BACK": 2,
"INSTANCE_UPDATING": 3,
"INSTANCE_ROLLING_BACK": 4,
"INSTANCE_UPDATE_FAILED": 5,
"INSTANCE_ROLLBACK_FAILED": 6,
}
class JobUpdatePulseStatus:
"""
Status of the coordinated update. Intended as a response to pulseJobUpdate RPC.
"""
OK = 1
FINISHED = 2
_VALUES_TO_NAMES = {
1: "OK",
2: "FINISHED",
}
_NAMES_TO_VALUES = {
"OK": 1,
"FINISHED": 2,
}
class APIVersion:
"""
Attributes:
- major
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'major', None, None, ), # 1
)
def __init__(self, major=None,):
self.major = major
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.major = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('APIVersion')
if self.major is not None:
oprot.writeFieldBegin('major', TType.I32, 1)
oprot.writeI32(self.major)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.major is None:
raise TProtocol.TProtocolException(message='Required field major is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.major)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Identity:
"""
Attributes:
- role
- user
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'role', None, None, ), # 1
(2, TType.STRING, 'user', None, None, ), # 2
)
def __init__(self, role=None, user=None,):
self.role = role
self.user = user
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.role = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.user = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Identity')
if self.role is not None:
oprot.writeFieldBegin('role', TType.STRING, 1)
oprot.writeString(self.role)
oprot.writeFieldEnd()
if self.user is not None:
oprot.writeFieldBegin('user', TType.STRING, 2)
oprot.writeString(self.user)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.role)
value = (value * 31) ^ hash(self.user)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SessionKey:
"""
Attributes:
- mechanism: The name of the authentication mechanism, which instructs the server how to interpret the data
field.
- data: A blob of data that the server may use for authentication.
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
(4, TType.STRING, 'mechanism', None, None, ), # 4
(5, TType.STRING, 'data', None, None, ), # 5
)
def __init__(self, mechanism=None, data=None,):
self.mechanism = mechanism
self.data = data
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 4:
if ftype == TType.STRING:
self.mechanism = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.data = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SessionKey')
if self.mechanism is not None:
oprot.writeFieldBegin('mechanism', TType.STRING, 4)
oprot.writeString(self.mechanism)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 5)
oprot.writeString(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.mechanism)
value = (value * 31) ^ hash(self.data)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResourceAggregate:
"""
Attributes:
- numCpus: Number of CPU cores allotted.
- ramMb: Megabytes of RAM allotted.
- diskMb: Megabytes of disk space allotted.
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'numCpus', None, None, ), # 1
(2, TType.I64, 'ramMb', None, None, ), # 2
(3, TType.I64, 'diskMb', None, None, ), # 3
)
def __init__(self, numCpus=None, ramMb=None, diskMb=None,):
self.numCpus = numCpus
self.ramMb = ramMb
self.diskMb = diskMb
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, | |
<reponame>yuejiaxiang/semEvel2020_task8
# -*- coding: UTF-8 -*-
# @Time : 2021/1/8
# @Author : <EMAIL>
# Apache License
# Copyright©2020-2021 <EMAIL> All Rights Reserved
import os
import json
import pickle
import random
import shutil
import re
import pandas as pd
from data_process.rule_base_unit import RuleBaseUnit
rbu = RuleBaseUnit()
random.seed(10)
def get_mod(ori_text, ori_input=''):
# IsMean 只处理了10%,带有average的
# IsMeanHasSD 放弃
# IsMeanHasTolerance 放弃
# IsMeanIsRange 放弃
# IsRangeHasTolerance 放弃
# 以下是IsList所还没有考虑的情况
# 20 × 20 degrees
# 6 kg to 13 kg
# 85/15%
mods = set()
text = ori_text.lower()
input = ori_input.lower()
approximate = ['approximately', '∼', '≈', '≳', '≲', 'nominally', 'about', 'around', 'close to', 'circa', 'the order of', 'near', 'roughly']
range = [' – ', '<', '>', '≤', '≥', '⩽', '⩾', '≳', '≲', 'above', 'at least', 'greater than', 'up to', 'to', 'after', 'as low as', 'as much as', 'at least', 'before', 'below', 'between', 'down to', 'last', 'less than', 'more than', 'over', 'range', 'ranging', 'since', 'top', 'up to', 'upto', 'upper', 'within', 'to']
if '±' in text:
mods.add('HasTolerance')
for app in approximate:
if app in text:
mods.add('IsApproximate')
break
if 'from' in text and 'to' in text:
mods.add('IsApproximate')
if 'and' in text or 'or' in text:
mods.add('IsList')
if 'average' in text:
mods.add('IsMean')
if 'median' in input or 'median' in text:
mods.add('IsMedian')
for ran in range:
if ran in text:
mods.add('IsRange')
break
if re.search('\d-\d', text):
mods.add('IsRange')
# if len(mods) == 0:
# if '.' not in text:
# mods.add('IsCount')
return list(mods)
class Mod:
def __init__(self, text_path, mod_path):
self.read_data(text_path, mod_path)
def read_data(self, text_path, mod_path):
with open(text_path, 'r', encoding='utf8') as fin:
text = [d.strip().split('\t')[0] for d in fin.readlines()]
with open(mod_path, 'r', encoding='utf8') as fin:
mod = [d.strip() for d in fin.readlines()]
self.mod = dict()
for t,m in zip(text[1:], mod):
self.mod[t] = m
def get_mod(self, ori_text, ori_input=''):
if ori_text in self.mod:
ori_label = self.mod[ori_text]
if ori_label == 'Empty':
return []
return ori_label.split('&')
else:
return get_mod(ori_text, ori_input=ori_input)
# 切.有难度,先不切
def text2list(text):
# text: 'ab cd'
# text_list: ['ab', 'cd']
# index2list: 00011
text_list = []
index2list = {}
tmp = ''
for t in range(len(text)):
index2list[t] = len(text_list)
if text[t] == ' ':
if len(tmp) > 0:
text_list.append(tmp)
tmp = ''
else:
tmp += text[t]
if len(tmp) > 0:
text_list.append(tmp)
return text_list, index2list
def choose_key(data, keyname):
all_data = []
for d in data:
new_d = dict()
for k in keyname:
new_d[k] = d[k]
all_data.append(new_d)
return all_data
def get_excel_format(ann_all):
excel_list = {}
annot_2_q = {}
annot_2_t = {}
# add Quantity
for i, ann in enumerate(ann_all):
startOffset, endOffset, annotType, annotType_append, text, annotId, other = ann
if annotType == 'Quantity':
excel_list[annotId] = {'Quantity': [text, startOffset]}
for k,v in other.items():
excel_list[annotId][k] = v
annot_2_q[annotId] = annotId
annot_2_t[annotId] = annotType
# add hasQuantity
for i, ann in enumerate(ann_all):
startOffset, endOffset, annotType, annotType_append, text, annotId, other = ann
for k, v in other.items():
if k == 'HasQuantity' and v in excel_list:
excel_list[v][annotType] = [text, startOffset]
annot_2_q[annotId] = v
annot_2_t[annotId] = annotType
# add hasProperty
for i, ann in enumerate(ann_all):
startOffset, endOffset, annotType, annotType_append, text, annotId, other = ann
for k, v in other.items():
if k == 'HasProperty' and v in annot_2_q:
excel_list[annot_2_q[v]][annotType] = [text, startOffset]
annot_2_q[annotId] = annot_2_q[v]
annot_2_t[annotId] = annotType
# add Qualifies
# 不确定是否会有重复
for i, ann in enumerate(ann_all):
startOffset, endOffset, annotType, annotType_append, text, annotId, other = ann
for k, v in other.items():
if k == 'Qualifies' and v in annot_2_q:
excel_list[annot_2_q[v]]['Qualifier_' + annot_2_t[v]] = [text, startOffset]
return excel_list
def get_ere_format(ann_all):
triples = []
excel_list = get_excel_format(ann_all)
for k, v in excel_list.items():
q_name = v['Quantity']
for m, p in zip(['MeasuredEntity', 'MeasuredProperty', 'Qualifier_Quantity'],
['toEntity', 'toProperty', 'toQualifier']):
if m in v:
triples.append([q_name, p, v[m]])
return triples, excel_list
def get_label_format(ann_all, additional_type='append'):
this_entity = dict()
for ann in ann_all:
startOffset, endOffset, annotType, annotType_append, text, _, _ = ann
for t in range(startOffset, endOffset):
type_this = annotType
if annotType_append and additional_type == 'append':
type_this = annotType + '-' + annotType_append
if type_this not in this_entity:
this_entity[type_this] = {}
if text not in this_entity[type_this]:
this_entity[type_this][text] = []
if [startOffset, endOffset - 1] not in this_entity[type_this][text]:
this_entity[type_this][text].append([startOffset, endOffset - 1])
return this_entity
def correct_boundary(b, e, text):
or_b = b
or_e = e
max_id = len(text)
while text[b].isalpha() and b > 0 and text[b-1].isalpha():
b -= 1
while text[e-1].isalpha() and e <= max_id-1 and text[e].isalpha():
e += 1
if e != or_e or b != or_b:
print('### correct_boundary ###')
print('ori: {}'.format(text[or_b:or_e]))
print('cor: {}'.format(text[b:e]))
return b, e, text[b:e]
def read_semeval(path_tsv, path_text, mode='train', additional_type='append', do_correct_boundary=True):
whole_ann = []
files = os.listdir(path_text)
for file in files:
if '.txt' not in file:
continue
full_file = os.path.join(path_text, file)
core, _ = os.path.splitext(file)
tsv_p = os.path.join(path_tsv, core + '.tsv')
with open(full_file, 'r', encoding='utf8') as fin:
text_all = fin.readlines()
if len(text_all) > 1:
print('warning: len(text) > 1: ', full_file)
text_all = ''.join(text_all)
text_all = text_all.replace('.\n', '\n')
text_all = [text_all.replace('\n', '. ')]
input_text = text_all[0].strip()
if mode == 'test':
whole_ann.append({'text': input_text, 'id':core})
continue
if not os.path.exists(tsv_p):
print('tsv not exist for {}'.format(full_file))
continue
data = pd.read_csv(tsv_p, sep='\t', header=0)
ann_all = []
for index, row in data.iterrows():
annotSet = int(row['annotSet'])
annotType = row['annotType']
startOffset = int(row['startOffset'])
endOffset = int(row['endOffset'])
annotId = row['annotId']
text = row['text']
if pd.isnull(row['other']):
other = {}
else:
other = json.loads(row['other'])
if do_correct_boundary:
startOffset, endOffset, text = correct_boundary(startOffset, endOffset, text_all[0])
if input_text[startOffset: endOffset] != text:
print('error: text not match: {}'.format(text))
annotType_append = None
if 'mods' in other:
if len(other['mods']) > 1:
# print('mods > 1: {}'.format(core))
pass
annotType_append = '&'.join(sorted(other['mods']))
ann_all.append([startOffset, endOffset, annotType, annotType_append, text, annotId, other])
this_entity = get_label_format(ann_all, additional_type=additional_type)
whole_ann.append({'text': input_text, 'anns': ann_all, 'label': this_entity, 'id':core})
return whole_ann
def read_semeval_list(path_tsv_list, path_text_list, mode='train', additional_type='notpad'):
whole_ann = []
for path_tsv, path_text in zip(path_tsv_list, path_text_list):
whole_ann += read_semeval(path_tsv, path_text, mode=mode, additional_type=additional_type)
return whole_ann
##分句函数
def cut_sentence(text):
re_exp = re.compile("(?<=[^A-Z]\.) (?![0-9%])")
raw_sentences = re.split(re_exp,text)
offset = 0
sentences = []
for idx,senten in enumerate(raw_sentences):
if not sentences:
sentences.append(senten)
else:
if len(senten)<100:
sentences[-1] = sentences[-1]+" "+senten
else:
sentences.append(senten)
sentence_offset = []
for sent in sentences:
sentence_offset.append([offset, offset + len(sent)])
offset += (len(sent)+1)
return (sentences, sentence_offset)
def cut_sentence_old(text):
sents = []
sents_indx = []
p = 0
for t in range(len(text)):
if t >= 1 and text[t-1:t+1] == '. ':
sents.append(text[p:t])
sents_indx.append([p, t])
p = t+1
if p < len(text):
sents.append(text[p:t+1])
sents_indx.append([p, t+1])
print('text: ', text)
print('sents: ', sents)
print('sents_indx: ', sents_indx)
return sents, sents_indx
def sliding_window(text, window=50, step=20):
sents = []
sents_indx = []
max_t = len(text)
for t in range(max_t):
if t % step == 0:
e = min(max_t, t+window)
sents.append(text[t:e])
sents_indx.append([t, e])
return sents, sents_indx
def split_data(whole_ann, mode='train', method='cut_sentence', additional_type='notpad'):
new_whole_ann = []
if method == 'cut_sentence':
split_method = cut_sentence
if method == 'sliding_window':
split_method = sliding_window
for wann in whole_ann:
text = wann['text']
if mode == 'train':
anns = wann['anns']
sents, sents_indx = split_method(text)
for sent, sentx in zip(sents, sents_indx):
if mode == 'test':
new_whole_ann.append({
'text': sent,
'sentx':sentx,
'id': wann['id'],
'quantity': [],
'excel': [],
})
continue
new_anns = []
for ann in anns:
startOffset, endOffset, annotType, annotType_append, text, annotId, other = ann
if startOffset >= sentx[0] and endOffset <= sentx[1]:
new_anns.append([startOffset-sentx[0], endOffset-sentx[0], annotType, annotType_append, text, annotId, other])
ann_dict_format = get_label_format(new_anns, additional_type=additional_type)
ere_triple_format, excel_list = get_ere_format(new_anns)
new_whole_ann.append({
'text': sent,
'anns': new_anns,
'label': ann_dict_format,
'quantity': ere_triple_format,
'excel': excel_list,
'sentx':sentx,
'id': wann['id']
})
return new_whole_ann
def add_rel(data):
anno_set = {}
id = 0
for type, v in data.items():
for text in v:
for vi in v[text]:
anno_id = vi[2]
if anno_id not in anno_set:
anno_set[anno_id] = []
annotId = 'T' + str(id)
id += 1
vi.append(annotId)
anno_set[anno_id].append(vi + [type, text])
rel = {}
for anno_id, v in anno_set.items():
anno_set[anno_id].sort(key=lambda x: x[0])
q = []
q_rel = {}
for i, vi in enumerate(v):
if vi[4] == 'Quantity':
q.append(vi)
q_rel[vi[3]] = []
for i, vi in enumerate(v):
if vi[4] == 'Quantity':
continue
this_dis = []
for j, qi in enumerate(q):
dis = min(abs(qi[0]-vi[1]), abs(qi[1]-vi[0]))
this_dis.append([dis, j])
this_dis.sort(key=lambda x: x[0])
if len(this_dis) > 0:
q_rel[q[this_dis[0][1]][3]].append(vi)
for k, v in q_rel.items():
# p2q
p = []
for vi in v:
if vi[4] == 'MeasuredProperty':
p.append(vi[3])
rel[vi[3]] = ['HasQuantity', k]
if vi[4] == 'Qualifier':
rel[vi[3]] = ['Qualifies', k]
for | |
cache `event` information to ensure that we:
* Remember which event we're currently in across restarts
* Provide an on-demand informational embed without re-querying the branding repository
An event change should always be handled via this function, as it ensures that the cache is populated.
The #changelog notification is omitted when `event` is fallback, or already applied.
Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
"""
log.info(f"Entering event: '{event.path}'.")
banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly.
await self.initiate_icon_rotation(event.icons) # Prepare a new rotation.
icon_success = await self.rotate_icons() # Apply an icon from the new rotation.
# This will only be False in the case of a manual same-event re-synchronisation.
event_changed = event.path != await self.cache_information.get("event_path")
# Cache event identity to avoid re-entry in case of restart.
await self.cache_information.set("event_path", event.path)
# Cache information shown in the 'about' embed.
await self.populate_cache_event_description(event)
# Notify guild of new event ~ this reads the information that we cached above.
if event_changed and not event.meta.is_fallback:
await self.send_info_embed(Channels.change_log, is_notification=True)
else:
log.trace("Omitting #changelog notification. Event has not changed, or new event is fallback.")
return banner_success, icon_success
async def synchronise(self) -> t.Tuple[bool, bool]:
"""
Fetch the current event and delegate to `enter_event`.
This is a convenience function to force synchronisation via a command. It should generally only be used
in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it
to `enter_event` directly.
Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
"""
log.debug("Synchronise: fetching current event.")
current_event, available_events = await self.repository.get_current_event()
await self.populate_cache_events(available_events)
if current_event is None:
log.error("Failed to fetch event. Cannot synchronise!")
return False, False
return await self.enter_event(current_event)
async def populate_cache_events(self, events: t.List[Event]) -> None:
"""
Clear `cache_events` and re-populate with names and durations of `events`.
For each event, we store its name and duration string. This is the information presented to users in the
calendar command. If a format change is needed, it has to be done here.
The cache does not store the fallback event, as it is not shown in the calendar.
"""
log.debug("Populating events cache.")
await self.cache_events.clear()
no_fallback = [event for event in events if not event.meta.is_fallback]
chronological_events = sorted(no_fallback, key=attrgetter("meta.start_date"))
log.trace(f"Writing {len(chronological_events)} events (fallback omitted).")
with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict.
await self.cache_events.update({
extract_event_name(event): extract_event_duration(event)
for event in chronological_events
})
async def populate_cache_event_description(self, event: Event) -> None:
"""
Cache `event` description & duration.
This should be called when entering a new event, and can be called periodically to ensure that the cache
holds fresh information in the case that the event remains the same, but its description changes.
The duration is stored formatted for the frontend. It is not intended to be used programmatically.
"""
log.debug("Caching event description & duration.")
await self.cache_information.set("event_description", event.meta.description)
await self.cache_information.set("event_duration", extract_event_duration(event))
# endregion
# region: Daemon
async def maybe_start_daemon(self) -> None:
"""
Start the daemon depending on cache state.
The daemon will only start if it has been explicitly enabled via a command.
"""
log.debug("Checking whether daemon should start.")
should_begin: t.Optional[bool] = await self.cache_information.get("daemon_active") # None if never set!
if should_begin:
self.daemon_loop.start()
def cog_unload(self) -> None:
"""
Cancel the daemon in case of cog unload.
This is **not** done automatically! The daemon otherwise remains active in the background.
"""
log.debug("Cog unload: cancelling daemon.")
self.daemon_loop.cancel()
async def daemon_main(self) -> None:
"""
Synchronise guild & caches with branding repository.
Pull the currently active event from the branding repository and check whether it matches the currently
active event in the cache. If not, apply the new event.
However, it is also possible that an event's assets change as it's active. To account for such cases,
we check the banner & icons hashes against the currently cached values. If there is a mismatch, each
specific asset is re-applied.
"""
log.info("Daemon main: checking current event.")
new_event, available_events = await self.repository.get_current_event()
await self.populate_cache_events(available_events)
if new_event is None:
log.warning("Daemon main: failed to get current event from branding repository, will do nothing.")
return
if new_event.path != await self.cache_information.get("event_path"):
log.debug("Daemon main: new event detected!")
await self.enter_event(new_event)
return
await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change.
log.trace("Daemon main: event has not changed, checking for change in assets.")
if new_event.banner.sha != await self.cache_information.get("banner_hash"):
log.debug("Daemon main: detected banner change.")
await self.apply_banner(new_event.banner)
if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"):
log.debug("Daemon main: detected icon change.")
await self.initiate_icon_rotation(new_event.icons)
await self.rotate_icons()
else:
await self.maybe_rotate_icons()
@tasks.loop(hours=24)
async def daemon_loop(self) -> None:
"""
Call `daemon_main` every 24 hours.
The scheduler maintains an exact 24-hour frequency even if this coroutine takes time to complete. If the
coroutine is started at 00:01 and completes at 00:05, it will still be started at 00:01 the next day.
"""
log.trace("Daemon loop: calling daemon main.")
try:
await self.daemon_main()
except Exception:
log.exception("Daemon loop: failed with an unhandled exception!")
@daemon_loop.before_loop
async def daemon_before(self) -> None:
"""
Call `daemon_loop` immediately, then block the loop until the next-up UTC midnight.
The first iteration is invoked directly such that synchronisation happens immediately after daemon start.
We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin.
"""
log.trace("Daemon before: performing start-up iteration.")
await self.daemon_loop()
log.trace("Daemon before: calculating time to sleep before loop begins.")
now = datetime.utcnow()
# The actual midnight moment is offset into the future to prevent issues with imprecise sleep.
tomorrow = now + timedelta(days=1)
midnight = datetime.combine(tomorrow, time(minute=1))
sleep_secs = (midnight - now).total_seconds()
log.trace(f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}.")
await asyncio.sleep(sleep_secs)
# endregion
# region: Command interface (branding)
@commands.group(name="branding")
async def branding_group(self, ctx: commands.Context) -> None:
"""Control the branding cog."""
if not ctx.invoked_subcommand:
await ctx.send_help(ctx.command)
@branding_group.command(name="about", aliases=("current", "event"))
async def branding_about_cmd(self, ctx: commands.Context) -> None:
"""Show the current event's description and duration."""
await self.send_info_embed(ctx.channel.id, is_notification=False)
@commands.has_any_role(*MODERATION_ROLES)
@branding_group.command(name="sync")
async def branding_sync_cmd(self, ctx: commands.Context) -> None:
"""
Force branding synchronisation.
Show which assets have failed to synchronise, if any.
"""
async with ctx.typing():
banner_success, icon_success = await self.synchronise()
failed_assets = ", ".join(
name
for name, status in [("banner", banner_success), ("icon", icon_success)]
if status is False
)
if failed_assets:
resp = make_embed("Synchronisation unsuccessful", f"Failed to apply: {failed_assets}.", success=False)
resp.set_footer(text="Check log for details.")
else:
resp = make_embed("Synchronisation successful", "Assets have been applied.", success=True)
await ctx.send(embed=resp)
# endregion
# region: Command interface (branding calendar)
@branding_group.group(name="calendar", aliases=("schedule", "events"))
async def branding_calendar_group(self, ctx: commands.Context) -> None:
"""
Show the current event calendar.
We draw event information from `cache_events` and use each key-value pair to create a field in the response
embed. As such, we do not need to query the API to get event information. The cache is automatically
re-populated by the daemon whenever it makes a request. A moderator+ can also explicitly request a cache
refresh using the 'refresh' subcommand.
Due to Discord limitations, we only show up to 25 events. This is entirely sufficient at the time of writing.
In the case that we find ourselves with more than 25 events, a warning log will alert core devs.
In the future, we may be interested in a field-paginating solution.
"""
if ctx.invoked_subcommand:
# If you're wondering why this works: when the 'refresh' subcommand eventually re-invokes
# this group, the attribute will be automatically set to None by the framework.
return
available_events = await self.cache_events.to_dict()
log.trace(f"Found {len(available_events)} cached events available for calendar view.")
if not available_events:
resp = make_embed("No events found!", "Cache may be empty, try `branding calendar refresh`.", success=False)
await ctx.send(embed=resp)
return
embed = discord.Embed(title="Current event calendar", colour=discord.Colour.blurple())
# Because Discord embeds can only contain up to 25 fields, we only show the first 25.
first_25 = list(available_events.items())[:25]
if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary.
log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25.")
for name, duration in first_25:
embed.add_field(name=name[:256], value=duration[:1024])
embed.set_footer(text="Otherwise, | |
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Reporter classes.
Classes implementing presentation, storage and retrieval of major geometric
characteristics of the spatial systems. Provides a unified set of
class methods imtended to be used in a similar way.
Two operational modes are possible:
- initial analysis, visualization and export of the results:
R.create(tp, s).pipeline(sp)
- import and visualization of stored analysis results:
R.create(tp, s).restore(p)
where:
'R': the report class name,
'tp': type of the spatial system,
's': flag of interactive visualization,
'sp': list of spatial system instances to be analysed,
'p': path to the stored analysis results.
"""
from __future__ import annotations
import json
from pathlib import Path
from typing import Final, Optional
import numpy as np
import cytoskeleton_analyser.fitting as fit
from ..histograms import Experimental
from ..histograms import Histogram
from ..histograms import Simulated
from ..report import Report
from .spatial_systems import FullDepth
from .spatial_systems import ListOfSpatialSystems
class Features:
"""Classification of reported features
"""
#: Features applicable to both full and sliced cell representations.
common: Final[list[str]] = [
'Lengths3d',
'Lengths2d',
'Curvature3d',
'RadialMass',
'RadialEnds',
'AnglesToRad',
'SegmentNumbers',
]
#: Features applicable to full cell representation alone.
only_full: Final[list[str]] = [
'AgesByNode',
'AgesByFilament',
]
#: Features applicable to sliced cell representation alone.
only_slice: Final[list[str]] = [
'Curvature2dConv',
'Curvature2dMboc17',
]
#: Complete set of implemented features.
all: Final[list[str]] = common + only_full + only_slice
__all__ = all
@staticmethod
def is_common(f: str) -> bool:
"""True if ``f`` belongs to set common to both representations.
:param f: Feature class name.
"""
return any(f == g for g in Features.common)
@staticmethod
def is_full(f: str) -> bool:
"""True if feature ``f`` is applicable to full representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.only_full)
@staticmethod
def is_slice(f: str) -> bool:
"""True if feature ``f`` is applicable to slice representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.only_slice)
@staticmethod
def is_any(f: str) -> bool:
"""True if feature ``f`` is applicable to any representation.
:param f: Feature class name.
"""
return any(f == g for g in Features.all)
@staticmethod
def is_applicable(
f: str,
tp: type[FullDepth],
) -> bool:
"""True if feature ``f`` is applicable to representation ``tp``.
:param f: Feature class name.
:param tp: Feature class name.
"""
return \
Features.is_common(f) or \
tp.type == 'slice' and Features.is_slice(f) or \
tp.type == 'full' and Features.is_full(f)
@staticmethod
def reporter(f: str):
"""Convert feature name ``f`` to corresponding reporter type.
:param f: Feature name.
"""
if not Features.is_any(f):
raise ValueError(f"{f} is not a valid position_feature.")
return globals()[f]
class _Report(Report):
"""Adaptation of report.Report class for spatial systems.
For subclassing specific to reported cytoskeleton attributes.
"""
tp: type[FullDepth] #: Type of the spatial system.
@classmethod
def _create(
cls,
tp: type[FullDepth],
name: str,
show: bool = True,
) -> None:
cls.tp = tp
cls.__create(
tp.logger,
tp.paths.data_out,
name + '_' + tp.type,
show
)
class Lengths3d(_Report):
"""Reports of microtubule lengths in .
"""
#: Part of figure and report titles.
LABEL: Final = '3d filament length in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Lengths3d]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.len_total3d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(data, cls.fits_sim, dx=0.4, density=True)
)
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 60.],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.fits_sim = [
# (fit.Gamma.loc0, [2., 0.1, 0.]),
# (fit.Weibull.full, [2., 1.]),
# (fit.Rayleigh.f, [1.]),
]
rep = cls.report(sp)
cls.summarize(rep, [0])
class Lengths2d(_Report):
"""Reports lengths of xy projections of the microtubules.
Examines apparent lengths of simulated microtubules and compares
them with experimental data obtained using optical microscopy.
For this purpose, implements experimental sets from microtubule
length measurements with superresolution methods
by Zhang et al. (MBoC 2017)
"""
#: Part of figure and report titles.
LABEL: Final = 'length of filament 2d projections in'
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[Lengths2d]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def _experimental(
cls,
cell_type: str,
):
import cytoskeleton_analyser.position.empirical_data.mboc17 as mboc17
bc, (contr, ca_ras) = mboc17.length(density=True)
if cell_type == 'RW_Protr':
h = ca_ras
elif cell_type == 'SpreRou':
h = contr
avg = mboc17.avg(bc, h)
cls.logger.info('\nEmpirical length of filament 2d projections in ' +
f'{cls.tp.type}: {avg} {cls.units}')
return bc, h, avg
@classmethod
def report(
cls,
sp: ListOfSpatialSystems,
) -> tuple[Histogram, list, list]:
data = [s.len_total2d for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
ct = cls.tp.params['cell'].typename
if cls.tp.type == 'slice' and \
(ct == 'RW_Protr' or
ct == 'SpreRou'):
bc, l, l_avg = cls._experimental(ct)
e = Experimental().initialise((bc, l), cls.fits_exp)
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.4, exper_bc=e.bc, density=True),
experimental=e
)
avg, std = [avg, l_avg], [std, np.nan]
else:
h = Histogram(
cls.name,
Simulated().initialise(
data, cls.fits_sim, dx=0.4, density=True),
)
avg, std = [avg], [std]
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, avg, std
@classmethod
def plot(
cls,
h: Histogram
) -> None:
h.plot(
cls.LABEL + ' ' + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 60.],
save_path=cls.path_out,
show=cls.show
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
best = []
if cls.tp.type == 'full':
cls.fits_sim = [
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
best = [0]
if cls.tp.type == 'slice':
# e = fit.Exponential.create()
# p = fit.Exponential.Pars
# tu = cls.units
cls.fits_exp = [
# (e.d_h, p(a=1., tau1=2.), tu),
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
cls.fits_sim = [
# (e.d_h, p(a=1., tau1=2.), tu),
# (fit.Gamma.loc0, [1., 1, 0.]),
# (fit.Weibull.full, [2., 3.]),
]
best = [1, 1]
rep = cls.report(sp)
cls.summarize(rep, best)
class RadialMass(_Report):
"""Reports distribution of microtubule masss.
Microtubule mass is analysed as a function of distance to cell
center in xy plane.
"""
#: Part of figure and report titles.
LABEL: Final = 'mass vs distance to center '
@classmethod
def create(
cls,
tp: type[FullDepth],
show: bool = True,
) -> type[RadialMass]:
super()._create(tp, __class__.__name__, show)
cls.units = tp.len_units
return cls
@classmethod
def report(
cls,
sp: ListOfSpatialSystems
) -> tuple[Histogram, list, list]:
data = [np.concatenate(s.center_dist_2d) for s in sp]
avg, std = cls.tp.print_avgstd(cls.LABEL, data, cls.units)
h = Histogram(
cls.name,
Simulated().initialise(
data, fits=cls.fits_sim, dx=0.25, density=True
),
)
h.to_csv(cls.path_out)
cls.plot(h)
cls.logger.info('')
return h, [avg], [std]
@classmethod
def plot(
cls,
h: Histogram
):
h.plot(
cls.LABEL + cls.tp.type,
xlabel=f'length ({cls.units})',
xlim=[0., 30.],
save_path=cls.path_out,
show=cls.show,
)
@classmethod
def pipeline(
cls,
sp: ListOfSpatialSystems,
) -> None:
cls.report(sp)
class RadialEnds(_Report):
"""Reports positions of of microtubule plus ends.
Analyse the distribution of microtubule plus ends as a function
of distance to cell | |
self.gds_validate_integer(ival_, node, 'tpInsc')
self.tpInsc = ival_
elif nodeName_ == 'nrInsc':
nrInsc_ = child_.text
nrInsc_ = self.gds_validate_string(nrInsc_, node, 'nrInsc')
self.nrInsc = nrInsc_
# end class TEmpregador
class TIdeVinculoNisObrig(GeneratedsSuper):
"""Informações do Vínculo"""
subclass = None
superclass = None
def __init__(self, cpfTrab=None, nisTrab=None, matricula=None):
self.original_tagname_ = None
self.cpfTrab = cpfTrab
self.nisTrab = nisTrab
self.matricula = matricula
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TIdeVinculoNisObrig)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TIdeVinculoNisObrig.subclass:
return TIdeVinculoNisObrig.subclass(*args_, **kwargs_)
else:
return TIdeVinculoNisObrig(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cpfTrab(self): return self.cpfTrab
def set_cpfTrab(self, cpfTrab): self.cpfTrab = cpfTrab
def get_nisTrab(self): return self.nisTrab
def set_nisTrab(self, nisTrab): self.nisTrab = nisTrab
def get_matricula(self): return self.matricula
def set_matricula(self, matricula): self.matricula = matricula
def hasContent_(self):
if (
self.cpfTrab is not None or
self.nisTrab is not None or
self.matricula is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TIdeVinculoNisObrig', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TIdeVinculoNisObrig')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TIdeVinculoNisObrig')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TIdeVinculoNisObrig', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TIdeVinculoNisObrig'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TIdeVinculoNisObrig', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.cpfTrab is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scpfTrab>%s</%scpfTrab>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.cpfTrab), input_name='cpfTrab')), namespace_, eol_))
if self.nisTrab is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snisTrab>%s</%snisTrab>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nisTrab), input_name='nisTrab')), namespace_, eol_))
if self.matricula is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smatricula>%s</%smatricula>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.matricula), input_name='matricula')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cpfTrab':
cpfTrab_ = child_.text
cpfTrab_ = self.gds_validate_string(cpfTrab_, node, 'cpfTrab')
self.cpfTrab = cpfTrab_
elif nodeName_ == 'nisTrab':
nisTrab_ = child_.text
nisTrab_ = self.gds_validate_string(nisTrab_, node, 'nisTrab')
self.nisTrab = nisTrab_
elif nodeName_ == 'matricula':
matricula_ = child_.text
matricula_ = self.gds_validate_string(matricula_, node, 'matricula')
self.matricula = matricula_
# end class TIdeVinculoNisObrig
class cpfTrab(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, cpfTrab)
if subclass is not None:
return subclass(*args_, **kwargs_)
if cpfTrab.subclass:
return cpfTrab.subclass(*args_, **kwargs_)
else:
return cpfTrab(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='cpfTrab', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('cpfTrab')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cpfTrab')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='cpfTrab', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='cpfTrab'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='cpfTrab', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cpfTrab
class nisTrab(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nisTrab)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nisTrab.subclass:
return nisTrab.subclass(*args_, **kwargs_)
else:
return nisTrab(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='nisTrab', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('nisTrab')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nisTrab')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='nisTrab', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nisTrab'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='nisTrab', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class nisTrab
class matricula(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, matricula)
if subclass is not None:
return subclass(*args_, **kwargs_)
if matricula.subclass:
return matricula.subclass(*args_, **kwargs_)
else:
return matricula(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='matricula', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('matricula')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='matricula')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='matricula', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='matricula'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='matricula', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class matricula
class TRemun(GeneratedsSuper):
"""Remuneração e periodicidade de pagamento"""
subclass = None
superclass = None
def __init__(self, vrSalFx=None, undSalFixo=None, dscSalVar=None):
self.original_tagname_ = None
self.vrSalFx = vrSalFx
self.undSalFixo = undSalFixo
self.dscSalVar = dscSalVar
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TRemun)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TRemun.subclass:
return TRemun.subclass(*args_, **kwargs_)
else:
return TRemun(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vrSalFx(self): return self.vrSalFx
def set_vrSalFx(self, vrSalFx): self.vrSalFx = vrSalFx
def get_undSalFixo(self): return self.undSalFixo
def set_undSalFixo(self, undSalFixo): self.undSalFixo = undSalFixo
def get_dscSalVar(self): return self.dscSalVar
def set_dscSalVar(self, dscSalVar): self.dscSalVar = dscSalVar
def hasContent_(self):
if (
self.vrSalFx is not None or
self.undSalFixo is not None or
self.dscSalVar is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TRemun', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TRemun')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TRemun')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TRemun', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TRemun'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TRemun', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.vrSalFx is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%svrSalFx>%s</%svrSalFx>%s' % (namespace_, self.gds_format_float(self.vrSalFx, input_name='vrSalFx'), namespace_, eol_))
if self.undSalFixo is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sundSalFixo>%s</%sundSalFixo>%s' % (namespace_, self.gds_format_integer(self.undSalFixo, input_name='undSalFixo'), namespace_, eol_))
if self.dscSalVar is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdscSalVar>%s</%sdscSalVar>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.dscSalVar), input_name='dscSalVar')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'vrSalFx':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
| |
parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}-RandomForest-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.sklearn:RandomForestOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for building RandomForest oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}_MorganFingerprint-FullyConnected-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# process the data into morgan fingerprints
feature_extractor=MorganFingerprintFeatures(dtype=np.float32),
# parameters used for building the model
model_kwargs=dict(embedding_size=32,
hidden_size=512,
activation='relu',
num_layers=2,
epochs=5,
shuffle_buffer=5000,
learning_rate=0.0001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}-FullyConnected-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(embedding_size=32,
hidden_size=512,
activation='relu',
num_layers=2,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.0001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}_MorganFingerprint-LSTM-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:LSTMOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training LSTM oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# process the data into morgan fingerprints
feature_extractor=MorganFingerprintFeatures(dtype=np.int32),
# parameters used for building the model
model_kwargs=dict(hidden_size=64,
num_layers=2,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}-LSTM-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:LSTMOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training LSTM oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=64,
num_layers=2,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}_MorganFingerprint-ResNet-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:ResNetOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training ResNet oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# process the data into morgan fingerprints
feature_extractor=MorganFingerprintFeatures(dtype=np.int32),
# parameters used for building the model
model_kwargs=dict(hidden_size=64,
activation='relu',
kernel_size=3,
num_blocks=4,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}-ResNet-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:ResNetOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training ResNet oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=64,
activation='relu',
kernel_size=3,
num_blocks=4,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}_MorganFingerprint-Transformer-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:TransformerOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training Transformer oracle
oracle_kwargs=dict(
noise_std=0.0,
internal_batch_size=32,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# process the data into morgan fingerprints
feature_extractor=MorganFingerprintFeatures(dtype=np.int32),
# parameters used for building the model
model_kwargs=dict(hidden_size=128,
feed_forward_size=512,
activation='relu',
num_heads=4,
num_blocks=4,
epochs=20,
shuffle_buffer=20000,
learning_rate=0.0001,
dropout_rate=0.2),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register(f'ChEMBL_{standard_type}_{assay_chembl_id}-Transformer-v0',
'design_bench.datasets.discrete.chembl_dataset:ChEMBLDataset',
'design_bench.oracles.tensorflow:TransformerOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0,
assay_chembl_id=assay_chembl_id,
standard_type=standard_type),
# keyword arguments for training Transformer oracle
oracle_kwargs=dict(
noise_std=0.0,
internal_batch_size=32,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=128,
feed_forward_size=512,
activation='relu',
num_heads=4,
num_blocks=4,
epochs=20,
shuffle_buffer=20000,
learning_rate=0.0001,
dropout_rate=0.2),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=50000,
to_disk=False)))
register('ToyContinuous-Exact-v0',
'design_bench.datasets.continuous.toy_continuous_dataset:ToyContinuousDataset',
'design_bench.oracles.exact:ToyContinuousOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building the exact oracle
oracle_kwargs=dict(
noise_std=0.0))
register('HopperController-Exact-v0',
'design_bench.datasets.continuous.hopper_controller_dataset:HopperControllerDataset',
'design_bench.oracles.exact:HopperControllerOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0),
# keyword arguments for building the exact oracle
oracle_kwargs=dict(
noise_std=0.0))
register('HopperController-GP-v0',
'design_bench.datasets.continuous.hopper_controller_dataset:HopperControllerDataset',
'design_bench.oracles.sklearn:GaussianProcessOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0),
# keyword arguments for building GP oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(kernel=ConstantKernel(
constant_value=1.0, constant_value_bounds=(0.0, 10.0)) *
RBF(length_scale=0.5, length_scale_bounds=(0.0, 10.0)) +
RBF(length_scale=2.0, length_scale_bounds=(0.0, 10.0))),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=5000,
to_disk=True,
disk_target="hopper_controller/split",
is_absolute=False)))
register('HopperController-RandomForest-v0',
'design_bench.datasets.continuous.hopper_controller_dataset:HopperControllerDataset',
'design_bench.oracles.sklearn:RandomForestOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0),
# keyword arguments for building RandomForest oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=5000,
to_disk=True,
disk_target="hopper_controller/split",
is_absolute=False)))
register('HopperController-FullyConnected-v0',
'design_bench.datasets.continuous.hopper_controller_dataset:HopperControllerDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=512,
activation='relu',
num_layers=2,
epochs=20,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=5000,
to_disk=True,
disk_target="hopper_controller/split",
is_absolute=False)))
register('Superconductor-GP-v0',
'design_bench.datasets.continuous.superconductor_dataset:SuperconductorDataset',
'design_bench.oracles.sklearn:GaussianProcessOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=80,
min_percentile=0),
# keyword arguments for building GP oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(kernel=ConstantKernel(
constant_value=1.0, constant_value_bounds=(1e-9, 10.0)) *
RBF(length_scale=0.5, length_scale_bounds=(1e-9, 10.0)) +
RBF(length_scale=2.0, length_scale_bounds=(1e-9, 10.0))),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=2000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('Superconductor-RandomForest-v0',
'design_bench.datasets.continuous.superconductor_dataset:SuperconductorDataset',
'design_bench.oracles.sklearn:RandomForestOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=80,
min_percentile=0),
# keyword arguments for building RandomForest oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=5000,
to_disk=True,
disk_target="superconductor/split",
is_absolute=False)))
register('Superconductor-FullyConnected-v0',
'design_bench.datasets.continuous.superconductor_dataset:SuperconductorDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=80,
min_percentile=0),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=512,
activation='relu',
num_layers=2,
epochs=5,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=5000,
to_disk=True,
disk_target="superconductor/split",
is_absolute=False)))
register('AntMorphology-Exact-v0',
'design_bench.datasets.continuous.ant_morphology_dataset:AntMorphologyDataset',
'design_bench.oracles.exact:AntMorphologyOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building the exact oracle
oracle_kwargs=dict(
noise_std=0.0))
register('AntMorphology-GP-v0',
'design_bench.datasets.continuous.ant_morphology_dataset:AntMorphologyDataset',
'design_bench.oracles.sklearn:GaussianProcessOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building GP oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(kernel=ConstantKernel(
constant_value=1.0, constant_value_bounds=(0.0, 10.0)) *
RBF(length_scale=0.5, length_scale_bounds=(0.0, 10.0)) +
RBF(length_scale=2.0, length_scale_bounds=(0.0, 10.0))),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=25000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('AntMorphology-RandomForest-v0',
'design_bench.datasets.continuous.ant_morphology_dataset:AntMorphologyDataset',
'design_bench.oracles.sklearn:RandomForestOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building RandomForest oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=25000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('AntMorphology-FullyConnected-v0',
'design_bench.datasets.continuous.ant_morphology_dataset:AntMorphologyDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=None,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(hidden_size=512,
activation='relu',
num_layers=2,
epochs=5,
shuffle_buffer=5000,
learning_rate=0.001),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.1,
subset=None,
shard_size=25000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('DKittyMorphology-Exact-v0',
'design_bench.datasets.continuous.dkitty_morphology_dataset:DKittyMorphologyDataset',
'design_bench.oracles.exact:DKittyMorphologyOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building the exact oracle
oracle_kwargs=dict(
noise_std=0.0))
register('DKittyMorphology-GP-v0',
'design_bench.datasets.continuous.dkitty_morphology_dataset:DKittyMorphologyDataset',
'design_bench.oracles.sklearn:GaussianProcessOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building GP oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(kernel=ConstantKernel(
constant_value=1.0, constant_value_bounds=(0.0, 10.0)) *
RBF(length_scale=0.5, length_scale_bounds=(0.0, 10.0)) +
RBF(length_scale=2.0, length_scale_bounds=(0.0, 10.0))),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=25000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('DKittyMorphology-RandomForest-v0',
'design_bench.datasets.continuous.dkitty_morphology_dataset:DKittyMorphologyDataset',
'design_bench.oracles.sklearn:RandomForestOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for building RandomForest oracle
oracle_kwargs=dict(
noise_std=0.0,
max_samples=2000,
distribution=None,
max_percentile=100,
min_percentile=0,
# parameters used for building the model
model_kwargs=dict(n_estimators=100,
max_depth=100,
max_features="auto"),
# parameters used for building the validation set
split_kwargs=dict(val_fraction=0.5,
subset=None,
shard_size=25000,
to_disk=False,
disk_target=None,
is_absolute=None)))
register('DKittyMorphology-FullyConnected-v0',
'design_bench.datasets.continuous.dkitty_morphology_dataset:DKittyMorphologyDataset',
'design_bench.oracles.tensorflow:FullyConnectedOracle',
# keyword arguments for building the dataset
dataset_kwargs=dict(
max_samples=None,
distribution=None,
max_percentile=40,
min_percentile=0),
# keyword arguments for training FullyConnected oracle
oracle_kwargs=dict(
noise_std=0.0,
| |
<filename>ckan/tests/legacy/functional/test_package.py<gh_stars>10-100
# encoding: utf-8
import pytest
from six import string_types
from ckan.common import config
from difflib import unified_diff
from ckan.tests.legacy import url_for
import ckan.tests.legacy as tests
from ckan.tests.legacy.html_check import HtmlCheckMethods
import ckan.model as model
from ckan.lib.create_test_data import CreateTestData
from ckan.logic.action import get, update
from ckan import plugins
from ckan.lib.search.common import SolrSettings
from ckan.tests.helpers import body_contains
existing_extra_html = (
'<label class="field_opt" for="Package-%(package_id)s-extras-%(key)s">%(capitalized_key)s</label>',
'<input id="Package-%(package_id)s-extras-%(key)s" name="Package-%(package_id)s-extras-%(key)s" size="20" type="text" value="%(value)s">',
)
class TestPackageBase(object):
key1 = u"key1 Less-than: < Umlaut: \xfc"
value1 = u"value1 Less-than: < Umlaut: \xfc"
# Note: Can't put a quotation mark in key1 or value1 because
# paste.fixture doesn't unescape the value in an input field
# on form submission. (But it works in real life.)
def _assert_form_errors(self, res):
self.check_tag(res, "<form", "has-errors")
assert "field_error" in res, res
def diff_responses(self, res1, res2):
return self.diff_html(res1.body, res2.body)
def diff_html(self, html1, html2):
return "\n".join(unified_diff(html1.split("\n"), html2.split("\n")))
class TestPackageForm(TestPackageBase):
"""Inherit this in tests for these form testing methods"""
def _check_package_read(self, res, **params):
assert not "Error" in res, res
assert u"%s - Datasets" % params["title"] in res, res
main_res = self.main_div(res)
main_div = main_res
main_div_str = main_div.encode("utf8")
assert params["name"] in main_div, main_div_str
assert params["title"] in main_div, main_div_str
assert params["version"] in main_div, main_div_str
self.check_named_element(main_div, "a", 'href="%s"' % params["url"])
prefix = "Dataset-%s-" % params.get("id", "")
for res_index, values in self._get_resource_values(
params["resources"], by_resource=True
):
self.check_named_element(main_div, "tr", *values)
assert params["notes"] in main_div, main_div_str
license = model.Package.get_license_register()[params["license_id"]]
assert license.title in main_div, (license.title, main_div_str)
tag_names = list(params["tags"])
self.check_named_element(main_div, "ul", *tag_names)
if "state" in params:
assert "State: %s" % params["state"] in main_div.replace(
"</strong>", ""
), main_div_str
if isinstance(params["extras"], dict):
extras = []
for key, value in params["extras"].items():
extras.append((key, value, False))
elif isinstance(params["extras"], (list, tuple)):
extras = params["extras"]
else:
raise NotImplementedError
for key, value, deleted in extras:
if not deleted:
key_in_html_body = self.escape_for_html_body(key)
value_in_html_body = self.escape_for_html_body(value)
self.check_named_element(
main_div, "tr", key_in_html_body, value_in_html_body
)
else:
self.check_named_element(main_div, "tr", "!" + key)
self.check_named_element(main_div, "tr", "!" + value)
def _get_resource_values(self, resources, by_resource=False):
assert isinstance(resources, (list, tuple))
for res_index, resource in enumerate(resources):
if by_resource:
values = []
for i, res_field in enumerate(
model.Resource.get_columns(extra_columns=False)
):
if isinstance(resource, string_types):
expected_value = resource if res_field == "url" else ""
elif hasattr(resource, res_field):
expected_value = getattr(resource, res_field)
elif isinstance(resource, (list, tuple)):
expected_value = resource[i]
elif isinstance(resource, dict):
expected_value = resource.get(res_field, u"")
else:
raise NotImplemented
if not by_resource:
yield (res_index, res_field, expected_value)
else:
values.append(expected_value)
if by_resource:
yield (res_index, values)
def escape_for_html_body(self, unescaped_str):
# just deal with chars in tests
return unescaped_str.replace("<", "<")
def check_form_filled_correctly(self, res, **params):
if "pkg" in params:
for key, value in params["pkg"].as_dict().items():
if key == "license":
key = "license_id"
params[key] = value
prefix = ""
main_res = self.main_div(res)
self.check_tag(main_res, prefix + "name", params["name"])
self.check_tag(main_res, prefix + "title", params["title"])
self.check_tag(main_res, prefix + "version", params["version"])
self.check_tag(main_res, prefix + "url", params["url"])
# for res_index, res_field, expected_value in self._get_resource_values(params['resources']):
# ## only check fields that are on the form
# if res_field not in ['url', 'id', 'description', 'hash']:
# continue
# self.check_tag(main_res, '%sresources__%i__%s' % (prefix, res_index, res_field), expected_value)
self.check_tag_and_data(main_res, prefix + "notes", params["notes"])
self.check_tag_and_data(main_res, "selected", params["license_id"])
if isinstance(params["tags"], string_types):
tags = list(map(lambda s: s.strip(), params["tags"].split(",")))
else:
tags = params["tags"]
for tag in tags:
self.check_tag(main_res, prefix + "tag_string", tag)
if "state" in params:
self.check_tag_and_data(main_res, "selected", str(params["state"]))
if isinstance(params["extras"], dict):
extras = []
for key, value in params["extras"].items():
extras.append((key, value, False))
else:
extras = params["extras"]
for num, (key, value, deleted) in enumerate(sorted(extras)):
key_in_html_body = self.escape_for_html_body(key)
value_in_html_body = self.escape_for_html_body(value)
key_escaped = key
value_escaped = value
self.check_tag(main_res, "extras__%s__key" % num, key_in_html_body)
self.check_tag(main_res, "extras__%s__value" % num, value_escaped)
if deleted:
self.check_tag(
main_res, "extras__%s__deleted" % num, "checked"
)
assert params["log_message"] in main_res, main_res
def _check_redirect(
self,
app,
return_url_param,
expected_redirect,
pkg_name_to_edit="",
extra_environ=None,
):
"""
@param return_url_param - encoded url to be given as param - if None
then assume redirect is specified in pylons config
@param expected_redirect - url we expect to redirect to (but <NAME>
not yet substituted)
@param pkg_name_to_edit - '' means create a new dataset
"""
try:
new_name = u"new-name"
offset_params = {}
if pkg_name_to_edit:
pkg_name = pkg_name_to_edit
pkg = model.Package.by_name(pkg_name)
assert pkg
pkg_id = pkg.id
named_route = "dataset.edit"
offset_params["id"] = pkg_name_to_edit
else:
named_route = "dataset.new"
pkg_id = ""
if return_url_param:
offset_params["return_to"] = return_url_param
offset = url_for(named_route, **offset_params)
res = app.post(offset, extra_environ=extra_environ, data={
"name": new_name,
"save": ""
}, follow_redirects=False)
assert not "Error" in res, res
redirected_to = res.headers['location']
expected_redirect_url = expected_redirect.replace(
"<NAME>", new_name
)
assert redirected_to == expected_redirect_url, (
"Redirected to %s but should have been %s"
% (redirected_to, expected_redirect_url)
)
finally:
# revert name change or pkg creation
pkg = model.Package.by_name(new_name)
if pkg:
if pkg_name_to_edit:
pkg.name = pkg_name_to_edit
else:
pkg.purge()
model.repo.commit_and_remove()
class TestReadOnly(TestPackageForm, HtmlCheckMethods):
@pytest.fixture(autouse=True)
def initial_data(self, clean_db, clean_index):
CreateTestData.create()
def test_read_nonexistentpackage(self, app):
name = "anonexistentpackage"
offset = url_for("dataset.read", id=name)
res = app.get(offset, status=404)
def test_read_internal_links(self, app):
pkg_name = (u"link-test",)
CreateTestData.create_arbitrary(
[
{
"name": pkg_name,
"notes": "Decoy link here: decoy:decoy, real links here: dataset:pkg-1, "
'tag:tag_1 group:test-group-1 and a multi-word tag: tag:"multi word with punctuation."',
}
]
)
offset = url_for("dataset.read", id=pkg_name)
res = app.get(offset)
def check_link(res, controller, id):
id_in_uri = id.strip('"').replace(
" ", "+"
) # remove quotes and percent-encode spaces
self.check_tag_and_data(
res,
"a ",
"%s/%s" % (controller, id_in_uri),
"%s:%s" % (controller, id.replace('"', """)),
)
check_link(res.body, "dataset", "pkg-1")
check_link(res.body, "group", "test-group-1")
assert "decoy</a>" not in res, res
assert 'decoy"' not in res, res
@pytest.mark.ckan_config("ckan.plugins", "test_package_controller_plugin")
@pytest.mark.usefixtures("with_plugins")
def test_read_plugin_hook(self, app):
plugin = plugins.get_plugin("test_package_controller_plugin")
name = u"annakarenina"
offset = url_for("dataset.read", id=name)
res = app.get(offset)
assert plugin.calls["read"] == 1, plugin.calls
assert plugin.calls["after_show"] == 1, plugin.calls
@pytest.mark.usefixtures("with_request_context")
def test_resource_list(self, app):
# TODO restore this test. It doesn't make much sense with the
# present resource list design.
name = "annakarenina"
cache_url = "http://thedatahub.org/test_cache_url.csv"
# add a cache_url to the first resource in the package
context = {
"model": model,
"session": model.Session,
"user": "testsysadmin",
}
data = {"id": "annakarenina"}
pkg = get.package_show(context, data)
pkg["resources"][0]["cache_url"] = cache_url
# FIXME need to pretend to be called by the api
context["api_version"] = 3
update.package_update(context, pkg)
# check that the cache url is included on the dataset view page
offset = url_for("dataset.read", id=name)
res = app.get(offset)
# assert '[cached]'in res
# assert cache_url in res
class TestEdit(TestPackageForm):
editpkg_name = u"editpkgtest"
@pytest.fixture(autouse=True)
def initial_data(self, clean_db):
CreateTestData.create()
CreateTestData.create_arbitrary(
{
"name": self.editpkg_name,
"url": u"editpkgurl.com",
"tags": [u"mytesttag"],
"resources": [
{
"url": u'url escape: & umlaut: \xfc quote: "',
"description": u'description escape: & umlaut: \xfc quote "',
}
],
}
)
self.editpkg = model.Package.by_name(self.editpkg_name)
self.pkgid = self.editpkg.id
self.offset = url_for("dataset.edit", id=self.editpkg_name)
self.editpkg = model.Package.by_name(self.editpkg_name)
self.admin = model.User.by_name(u"testsysadmin")
self.extra_environ_admin = {
"REMOTE_USER": self.admin.name.encode("utf8")
}
self.extra_environ_russianfan = {"REMOTE_USER": "russianfan"}
def test_redirect_after_edit_using_param(self, app):
return_url = "http://random.site.com/dataset/<NAME>?test=param"
# It's useful to know that this url encodes to:
# 'http%3A%2F%2Frandom.site.com%2Fdataset%2F%3CNAME%3E%3Ftest%3Dparam'
expected_redirect = return_url
self._check_redirect(
app,
return_url,
expected_redirect,
pkg_name_to_edit=self.editpkg_name,
extra_environ=self.extra_environ_admin,
)
def test_redirect_after_edit_using_config(self, app):
return_url = "" # redirect comes from test.ini setting
expected_redirect = config["package_edit_return_url"]
self._check_redirect(
app,
return_url,
expected_redirect,
pkg_name_to_edit=self.editpkg_name,
extra_environ=self.extra_environ_admin,
)
def test_edit_404(self, app):
self.offset = url_for("dataset.edit", id="random_name")
app.get(self.offset, status=404)
def test_edit_pkg_with_relationships(self, app):
# add a relationship to a package
pkg = model.Package.by_name(self.editpkg_name)
anna = model.Package.by_name(u"annakarenina")
pkg.add_relationship(u"depends_on", anna)
model.repo.commit_and_remove()
# check relationship before the test
rels = model.Package.by_name(self.editpkg_name).get_relationships()
assert (
str(rels)
== "[<*PackageRelationship editpkgtest depends_on annakarenina>]"
)
# edit the package
self.offset = url_for("dataset.edit", id=self.editpkg_name)
res = app.post(self.offset, extra_environ=self.extra_environ_admin, data={
"save": "",
"title": "New Title"
}, follow_redirects=False)
# check relationship still exists
rels = model.Package.by_name(self.editpkg_name).get_relationships()
assert (
str(rels)
== "[<*PackageRelationship editpkgtest depends_on annakarenina>]"
)
class TestDelete(object):
@pytest.fixture
def initial_data(self, clean_db):
CreateTestData.create()
CreateTestData.create_test_user()
@pytest.fixture
def users(self, initial_data):
admin = model.User.by_name(u"testsysadmin")
return {
"admin": {"REMOTE_USER": admin.name.encode("utf8")},
"tester": {"REMOTE_USER": "tester"},
}
@pytest.mark.ckan_config("ckan.plugins", "test_package_controller_plugin")
@pytest.mark.usefixtures("with_plugins")
def test_delete(self, app, users):
plugin = plugins.get_plugin("test_package_controller_plugin")
offset = url_for("dataset.delete", id="warandpeace")
# Since organizations, any owned dataset can be edited/deleted by any
# user
app.post(offset, extra_environ=users["tester"])
app.post(offset, extra_environ=users["admin"])
assert model.Package.get("warandpeace").state == u"deleted"
assert plugin.calls["delete"] == 2
assert plugin.calls["after_delete"] == 2
class TestNew:
@pytest.fixture
def env_user(self, clean_db):
CreateTestData.create_test_user()
return {"REMOTE_USER": "tester"}
@pytest.mark.ckan_config("ckan.plugins", "test_package_controller_plugin")
@pytest.mark.usefixtures("with_plugins")
def test_new_plugin_hook(self, env_user, app):
plugin = plugins.get_plugin("test_package_controller_plugin")
offset = url_for("dataset.new")
new_name = u"plugged"
res = app.post(offset, extra_environ=env_user, data={
"name": new_name,
"save": ""
}, follow_redirects=False)
assert plugin.calls["edit"] == 0, plugin.calls
assert plugin.calls["create"] == 1, plugin.calls
@pytest.mark.ckan_config("ckan.plugins", "test_package_controller_plugin")
| |
<gh_stars>0
import re
from datetime import datetime, date, timedelta
import json
from decimal import Decimal
from django.db.models import Sum, Q
from nt_s_common.decorator import cache_required
from .models import *
def show_notary_info_list():
notary_names = Notaries.objects.filter(flag=1).values_list('name')
return [name[0] for name in notary_names if name[0]]
def get_notary_region_rate():
notary_names = Notaries.objects.filter(flag=1).values('region', 'granted_allowance')
region_dict = {}
for notary in notary_names:
region = notary.get('region')
region_dict.setdefault(region, 0)
region_dict[region] += notary.get('granted_allowance')
return region_dict
def get_notary_req(name):
if not Notaries.objects.filter(name=name).count() and not Notaries.objects.filter(address=name).count():
return 13000, ''
nt = (Notaries.objects.filter(name=name) or Notaries.objects.filter(address=name)).values('address',
'github_accounts_dict',
'granted_allowance',
'region')[0]
github_accounts_dict = nt.get('github_accounts_dict')
git_list = [key for key, val in github_accounts_dict.items()]
rats = RequestAllowanceRecord.objects.filter(assignee__in=git_list, flag=1).values('assignor', 'apply_address',
'created_at', 'region',
'request_datacap', 'status',
'allocated_datacap', 'msg_cid',
'comments_url', 'name', 'media')
data_list = []
for rat in rats:
create_at = rat.get('created_at')
data_list.append({
'assignor': rat.get('assignor'),
'request_datacap': rat.get('request_datacap'),
'created_at': create_at.strftime('%Y-%m-%d %H:%M:%S') if create_at else create_at,
'region': rat.get('region'),
'apply_address': rat.get('apply_address'),
'status': rat.get('status'),
'allocated_datacap': rat.get('allocated_datacap'),
'msg_cid': rat.get('msg_cid'),
'url': get_req_url(rat.get('comments_url')),
'height': get_height(msg_cid=rat.get('msg_cid')),
'name': rat.get('name'),
'issue_id': get_api_issue_id(rat.get('comments_url')),
'media': rat.get('media'),
})
# print(data_list)
return 0, data_list
def get_notary_info(name):
if not Notaries.objects.filter(name=name).count() and not Notaries.objects.filter(address=name).count():
return 13000, ''
nt = (Notaries.objects.filter(name=name) or Notaries.objects.filter(address=name)).values('name', 'address',
'github_accounts_dict',
'granted_allowance',
'region',
'github_accounts_dict')[0]
address = nt.get('address')
github_accounts_dict = nt.get('github_accounts_dict')
git_list = [key for key, val in github_accounts_dict.items()]
refu = RequestAllowanceRecord.objects.filter(assignee__in=git_list, flag=1, status=0).aggregate(
tt=Sum('request_datacap')).get('tt')
allo = 0
allo_data = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient').values(
'msg_params')
for data in allo_data:
allo += Decimal(json.loads(data.get('msg_params')).get('Allowance'))
tot = nt.get('granted_allowance')
req_time = RequestAllowanceRecord.objects.filter(assignee__in=git_list, flag=1).count()
data_dict = {}
github_account = list(nt.get('github_accounts_dict').keys())[0]
notary_info = {
'address': address,
'name': nt.get('name'),
'refused': refu if refu else 0,
'allow': allo if allo else 0,
'total': tot,
'req_time': req_time,
'region': nt.get('region'),
'github_name': github_account,
'github_url': f'https://github.com/{github_account}'
}
data_dict['notary_info'] = notary_info
# rats = RequestAllowanceRecord.objects.filter(assignee__in=git_list, flag=1, status=2).values('allocated_datacap',
# 'name')
rats = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient').values(
'msg_params')
usage_dict = {}
for rat in rats:
ori_dict = json.loads(rat.get('msg_params'))
key = ori_dict.get('Address')
usage_dict.setdefault(key, 0)
usage_dict[key] += Decimal(ori_dict.get('Allowance'))
rate_dict = {}
# 作用是解决少几次数据库的查询
for key, val in usage_dict.items():
if RequestAllowanceRecord.objects.filter(apply_address=key).count() or RequestAllowanceRecord.objects.filter(
allocated_address=key).count():
key = RequestAllowanceRecord.objects.filter(
Q(apply_address=key) | Q(allocated_address=key)).first().name.strip('\r')
rate_dict[key] = val
rate_dict['unallow'] = tot - allo
# rate_list = []
# for k,v in rate_dict.items():
# rate_list.append({
#
# })
data_dict['rate_dict'] = rate_dict
return 0, data_dict
def query_msg(msg_cid):
code_re = {0: 'OK',
1: 'SysErrSenderInvalid',
2: 'SysErrSenderStateInvalid',
3: 'SysErrInvalidMethod',
4: 'SysErrReserved1',
5: 'SysErrInvalidReceiver',
6: 'SysErrInsufficientFunds',
7: 'SysErrOutOfGas',
8: 'SysErrForbidden',
9: 'SysErrorlllegalActor',
10: 'SysErrorlllegalArgument',
11: 'SysErrReserved2',
12: 'SysErrorReserved3',
13: 'SysErrorReserved4',
14: 'SysErrorReserved5',
15: 'SysErrorReserved6',
16: 'ErrIllegalArgument',
17: 'ErrNotFound',
18: 'ErrForbidden',
19: 'ErrInsufficientFunds',
20: 'ErrIllegalState',
21: 'ErrSerialization',
32: 'ErrTooManyProveCommits'}
if not MessageDetails.objects.filter(msg_cid=msg_cid).count():
return 13000, ''
md = MessageDetails.objects.filter(msg_cid=msg_cid).values('msg_cid', 'height', 'height_time', 'msg_from', 'msg_to',
'msg_method_name', 'msg_value', 'msgrct_exit_code',
'msg_nonce', 'msg_gas_fee_cap', 'msg_gas_premium',
'msg_gas_limit', 'gascost_gas_used', 'base_fee','msg_return', 'msg_params')
ret_data = md[0]
ret_data['msg_params'] = json.loads(ret_data.get('msg_params'))
ret_data['msg_return'] = json.loads(ret_data.get('msg_return'))
ret_data['exit'] = code_re.get(ret_data.get('msgrct_exit_code'))
return 0, ret_data
def get_provider_info_basic_info(provider_id):
if not TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).count():
return 13000, ''
order_details_qset = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).values('deal_id',
'height_time',
'file_size',
'client_address')
total_size = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).aggregate(
ts=Sum('file_size')).get('ts')
order_details_list = []
basic_info = {'provider_id': provider_id, 'num': order_details_qset.count(), 'total_size': total_size}
query_data = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1,
order_date__isnull=False).order_by('height_time').values('order_date',
'file_size')
first_date = query_data[0].get('order_date')
t_delta = (date.today() - first_date).days + 1
order_stat = {}
for delta in range(t_delta):
order_stat.setdefault(first_date.strftime('%Y-%m-%d'), 0)
first_date += timedelta(days=1)
for data in query_data:
order_stat[data.get('order_date').strftime('%Y-%m-%d')] += data.get('file_size')
for order_details in order_details_qset:
order_details_list.append(order_details)
ret_data = {'basic_info': basic_info, 'order_stat': order_stat}
return 0, ret_data
def get_provider_info_order_stat(provider_id):
if not TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).count():
return 13000, ''
order_details_qset = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).values('deal_id',
'height_time',
'file_size',
'client_address')
total_size = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).aggregate(
ts=Sum('file_size')).get('ts')
order_details_list = []
basic_info = {'provider_id': provider_id, 'num': order_details_qset.count(), 'total_size': total_size}
query_data = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1,
order_date__isnull=False).order_by('height_time').values('order_date')
first_date = query_data[0].get('order_date')
t_delta = (date.today() - first_date).days + 1
order_stat = {}
for delta in range(t_delta):
order_stat.setdefault(first_date.strftime('%Y-%m-%d'), 0)
first_date += timedelta(days=1)
for data in query_data:
order_stat[data.get('order_date').strftime('%Y-%m-%d')] += 1
for order_details in order_details_qset:
order_details_list.append(order_details)
# ret_data = {'basic_info': basic_info, 'order_stat': order_stat, 'order_details_list': order_details_list}
time_list = []
data_list = []
for key, val in order_stat.items():
time_list.append(key)
data_list.append(val)
return 0, time_list, data_list
def get_provider_info_order_details_list(provider_id):
if not TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).count():
return 13000, ''
order_details_qset = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).values('deal_id',
'height_time',
'file_size',
'client_address')
total_size = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).aggregate(
ts=Sum('file_size')).get('ts')
order_details_list = []
basic_info = {'provider_id': provider_id, 'num': order_details_qset.count(), 'total_size': total_size}
query_data = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1,
order_date__isnull=False).order_by('height_time').values('order_date')
first_date = query_data[0].get('order_date')
t_delta = (date.today() - first_date).days + 1
order_stat = {}
for delta in range(t_delta):
order_stat.setdefault(first_date.strftime('%Y-%m-%d'), 0)
first_date += timedelta(days=1)
for data in query_data:
order_stat[data.get('order_date').strftime('%Y-%m-%d')] += 1
for order_details in order_details_qset:
# todo 处理时间
order_details['height_time'] = order_details['height_time'].strftime('%Y-%m-%d') if order_details[
'height_time'] else None
order_details_list.append(order_details)
ret_data = {'basic_info': basic_info, 'order_stat': order_stat, 'order_details_list': order_details_list}
return 0, order_details_list
def get_provider_info_client_list(provider_id):
if not TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).count():
return 13000, ''
order_details_qset = TransactionRecord.objects.filter(provider_id=provider_id, is_verified=1).values('file_size',
'client_address')
data_dict = {}
ret_list = []
for order_details in order_details_qset:
address = order_details.get('client_address')
data_dict.setdefault(address, {'total_size': 0, 'num': 0})
data_dict[address]['total_size'] += order_details.get('file_size')
data_dict[address]['num'] += 1
for client_address, val in data_dict.items():
# 写缓存
data = get_notary_client_details(must_update_cache=False)
if not data:
data = get_notary_client_details(must_update_cache=True)
notary_client_details = data.get(client_address)
if not notary_client_details:
notary_client_details = {'notaries': None, 'datacap': None}
val['datacap'] = notary_client_details.get('datacap')
val['notaries'] = ','.join(notary_client_details.get('notaries')) if notary_client_details.get(
'notaries') else None
val.update({'address': client_address})
ret_list.append(val)
return 0, ret_list
def get_name_list():
notary_list = [i[0] for i in Notaries.objects.filter(flag=1).order_by('name').values_list('name').distinct()]
client_list = list({i[0].strip() for i in RequestAllowanceRecord.objects.filter(flag=1).order_by('name').values_list('name').distinct() if i[0]})
return {'notary_list': notary_list, 'client_list': client_list}
def get_new_seven_data():
today_ = date.today()
query_data = MessageDetails.objects.filter(msg_method_name='AddVerifiedClient',
msg_date__gte=(today_ - timedelta(days=7))).order_by('msg_date').values(
'msg_date', 'msg_params')
data_dict = {}
for i in range(7, 0, -1):
data_dict.setdefault((today_ - timedelta(days=i)).strftime('%Y-%m-%d'), {'allowance': 0, 'count': 0})
for data in query_data:
j_data = json.loads(data.get('msg_params'))
print(j_data.get('Allowance'), type(j_data.get('Allowance')))
if data.get('msg_date').strftime('%Y-%m-%d') in data_dict:
data_dict[data.get('msg_date').strftime('%Y-%m-%d')]['allowance'] += Decimal(j_data.get('Allowance'))
data_dict[data.get('msg_date').strftime('%Y-%m-%d')]['count'] += 1
data_list = []
for key, val in data_dict.items():
data_list.append({
'time': key,
'allowance': val.get('allowance'),
'count': val.get('count'),
})
return 0, data_list
@cache_required(cache_key='notary_distribution_data', cache_key_type=1, expire=24 * 60 * 60)
def distribution_data(must_update_cache=False):
nts = Notaries.objects.filter(flag=1).values('address',
'granted_allowance',
'github_accounts_dict')
distribution_data = 0
total_datacap = 0
for nt in nts:
address = nt.get('address')
ghd = nt.get('github_accounts_dict')
total_datacap += nt.get('granted_allowance') if nt.get('granted_allowance') else 0
query_data = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient').values(
'msg_params')
for origin_data in query_data:
distribution_data += Decimal(json.loads(origin_data.get('msg_params')).get('Allowance'))
# query_data = RequestAllowanceRecord.objects.filter(assignee__in=ghd, status=2, flag=1).aggregate(
# tt=Sum('allocated_datacap')).get('tt') or 0
# distribution_data += Decimal(query_data)
total_datacap = Notaries.objects.all().aggregate(tt=Sum('granted_allowance')).get('tt')
return 0, {'distribution_data': distribution_data, 'total_data': total_datacap,
'distribution_data_rate': round(distribution_data / total_datacap, 2),
'undistribution_data': 1 - round(distribution_data / total_datacap, 2)}
# def notary_distribution_data(notary_list):
# print(notary_list)
# notary_list = json.loads(notary_list) if notary_list else None
# if notary_list:
# if not Notaries.objects.filter(name__in=notary_list).count():
# return 13000, ''
# nts = Notaries.objects.filter(name__in=notary_list).values('address',
# 'granted_allowance') if notary_list else Notaries.objects.filter(
# flag=1).values('address', 'granted_allowance')
# distribution_data = 0
# total_datacap = 0
# for nt in nts:
# address = nt.get('address')
# total_datacap += nt.get('granted_allowance') if nt.get('granted_allowance') else 0
# query_data = MessageDetails.objects.filter(msg_from=address).values('msg_params')
# # RequestAllowanceRecord.objects.filter()
# for data in query_data:
# distribution_data += Decimal(json.loads(data.get('msg_params')).get('Allowance'))
#
# return 0, {'distribution_data': distribution_data, 'total_data': total_datacap,
# 'distribution_data_rate': round(distribution_data / total_datacap, 2),
# 'undistribution_data': 1 - round(distribution_data / total_datacap, 2)}
# def notary_distribution_data(notary_list):
# notary_list = json.loads(notary_list) if notary_list else None
# if notary_list:
# if not Notaries.objects.filter(name__in=notary_list).count():
# return 13000, ''
# nts = Notaries.objects.filter(name__in=notary_list, flag=1).values('address',
# 'granted_allowance',
# 'github_accounts_dict') if notary_list else Notaries.objects.filter(
# flag=1).values('address', 'granted_allowance', 'github_accounts_dict')
# distribution_data = 0
# total_datacap = 0
# for nt in nts:
# address = nt.get('address')
# print(address)
# ghd = nt.get('github_accounts_dict')
# total_datacap += nt.get('granted_allowance') if nt.get('granted_allowance') else 0
# # query_data = RequestAllowanceRecord.objects.filter(assignee__in=ghd, status=2, flag=1).aggregate(
# # tt=Sum('allocated_datacap')).get('tt') or 0
# # distribution_data += Decimal(query_data)
# query_data = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient').values(
# 'msg_params')
# for origin_data in query_data:
# distribution_data += Decimal(json.loads(origin_data.get('msg_params')).get('Allowance'))
#
# return 0, {'distribution_data': distribution_data, 'total_data': total_datacap,
# 'distribution_data_rate': round(distribution_data / total_datacap, 2),
# 'undistribution_data': 1 - round(distribution_data / total_datacap, 2)}
@cache_required(cache_key='notary_distribution_data', cache_key_type=1, expire=24 * 60 * 60)
def notary_distribution_data(notary_list=None, must_update_cache=False):
notary_list = json.loads(notary_list) if notary_list else None
if notary_list:
if not Notaries.objects.filter(name__in=notary_list).count():
return 13000, ''
nts = Notaries.objects.filter(name__in=notary_list, flag=1).values('address',
'granted_allowance',
'github_accounts_dict') if notary_list else Notaries.objects.filter(
flag=1).values('address', 'granted_allowance', 'github_accounts_dict', 'region')
distribution_data = 0
distribution_data_24h = 0
distribution_data_1d = 0
total_datacap = 0
region_list = []
today_ = datetime.today()
for nt in nts:
region_list.append(nt.get('region'))
address = nt.get('address')
print(address)
ghd = nt.get('github_accounts_dict')
total_datacap += nt.get('granted_allowance') if nt.get('granted_allowance') else 0
# query_data = RequestAllowanceRecord.objects.filter(assignee__in=ghd, status=2, flag=1).aggregate(
# tt=Sum('allocated_datacap')).get('tt') or 0
# distribution_data += Decimal(query_data)
query_data = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient').values(
'msg_params')
query_date = (date.today() - timedelta(days=1)).strftime('%Y-%m-%d')
query_data_24h = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient',
height_time__range=(today_ - timedelta(days=1), today_)).values(
'msg_params')
query_data_1d = MessageDetails.objects.filter(msg_from=address, msg_method_name='AddVerifiedClient',
msg_date=query_date).values('msg_params')
for origin_data in query_data:
distribution_data += Decimal(json.loads(origin_data.get('msg_params')).get('Allowance'))
for origin_data in query_data_24h:
distribution_data_24h += Decimal(json.loads(origin_data.get('msg_params')).get('Allowance'))
for origin_data in query_data_1d:
distribution_data_1d += Decimal(json.loads(origin_data.get('msg_params')).get('Allowance'))
return 0, {'distribution_data': distribution_data, 'total_data': total_datacap,
'distribution_data_rate': round(distribution_data / total_datacap, 2),
'undistribution_data': 1 - round(distribution_data / total_datacap, 2),
'undistribution': total_datacap - distribution_data,
'distribution_data_1d': distribution_data_1d,
'nums': 23, 'region_num': len(set(region_list)), 'distribution_data_24h': distribution_data_24h}
def get_handle_efficiency(notary_list):
notary_list = json.loads(notary_list) if notary_list else | |
0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Eviloid, 22.08.2019
import os, sys
import urllib, urllib2, urlparse, cookielib
import re, json
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
import CommonFunctions
import sqlite3 as sql
PLUGIN_NAME = 'FanSerials'
common = CommonFunctions
common.plugin = PLUGIN_NAME
try:handle = int(sys.argv[1])
except:pass
addon = xbmcaddon.Addon(id='plugin.video.evld.fanserials.tv')
Pdir = addon.getAddonInfo('path')
icon = xbmc.translatePath(os.path.join(Pdir, 'icon.png'))
fanart = xbmc.translatePath(os.path.join(Pdir, 'fanart.jpg'))
db = xbmc.translatePath(os.path.join(Pdir, 'serials.db'))
BASE_URL = 'https://' + addon.getSetting('host')
IMG_URL_PATTERN = BASE_URL.strip('/') + '/storage/serials/%s/v2/%s.jpg'
ART_URL_PATTERN = BASE_URL.strip('/') + '/storage/serials/%s/h2/%s.jpg'
sound_mode = int(addon.getSetting('sound'))
auto_update_description = addon.getSetting('AutoUpdate') == 'true'
def main_menu():
add_item('[B]Сериалы[/B]', params={'mode':'abc', 't':'0'}, fanart=fanart, isFolder=True)
add_item('[B]Аниме[/B]', params={'mode':'abc', 't':'2'}, fanart=fanart, isFolder=True)
add_item('[B]Мультсериалы[/B]', params={'mode':'abc', 't':'1'}, fanart=fanart, isFolder=True)
add_item('[B]Документальное[/B]', params={'mode':'abc', 't':'3'}, fanart=fanart, isFolder=True)
add_item('[B]ТВ-шоу[/B]', params={'mode':'abc', 't':'6'}, fanart=fanart, isFolder=True)
add_item('[B]Новые сериалы[/B]', params={'mode':'new_serials'}, fanart=fanart, isFolder=True)
html = get_html(BASE_URL + '/new/')
container = common.parseDOM(html, 'div', attrs={'id':'episode_list'})
episodes = common.parseDOM(container, 'div', attrs={'class':'item-serial'})
if len(episodes) > 0:
for episode in episodes:
img = common.parseDOM(episode, 'div', attrs={'class':'field-img'}, ret='style')[0]
img = img[23:-3]
desc = common.parseDOM(episode, 'div', attrs={'class':'field-description'})[0]
desc = common.parseDOM(desc, 'a')[0]
plot = common.replaceHTMLCodes(desc)
desc = common.parseDOM(episode, 'div', attrs={'class':'field-title'})[0]
desc = common.parseDOM(desc, 'a')[0]
title = '[COLOR=yellow]%s[/COLOR] [COLOR=gray]%s[/COLOR]' % (common.replaceHTMLCodes(desc), plot)
u = common.parseDOM(episode, 'a', ret='href')[0]
menu = [('Все серии', 'Container.Update("%s?mode=jump&u=%s")' % (sys.argv[0], urllib.quote_plus(u)))]
add_item(title, params={'mode':'episode', 'u':u}, plot=plot, thumb=img, fanart=fanart, isFolder=sound_mode==1, isPlayable=sound_mode==0, menu=menu)
add_item('[B]Поиск[/B]', params={'mode':'search'}, fanart=fanart, icon='DefaultAddonsSearch.png', isFolder=True)
xbmcplugin.setContent(handle, 'videos')
xbmcplugin.endOfDirectory(handle)
def get_description(url, id, force=False):
plot = db_restore(id)
if plot is None or force:
html = get_html('%s/%s/' % (BASE_URL, url))
desc = common.parseDOM(html, 'div', attrs={'class':'body', 'itemprop':'description'})
if len(desc) > 0:
plot = common.stripTags(desc[0])
db_store(id, plot)
else:
plot = ''
return plot
def search(params):
keyword = ''
kbd = xbmc.Keyboard('', 'Поиск:')
kbd.doModal()
if kbd.isConfirmed():
keyword = kbd.getText()
if keyword:
html = get_html('%s/search/' % BASE_URL, params={'query':keyword})
serials = common.parseDOM(html, 'div', attrs={'class':'item-search-serial'})
if len(serials) > 0:
for serial in serials:
img = common.parseDOM(serial, 'img', ret='src')[0].replace('/v1','/v2')
title = common.parseDOM(serial, 'img', ret='alt')[0]
u = common.parseDOM(serial, 'a', ret='href')[0].strip('/')
desc = common.parseDOM(serial, 'p', attrs={'class':'textailor'})[0]
add_item(title, params={'mode':'seasons', 'u':u}, poster=img, fanart=fanart, plot=desc, isFolder=True)
xbmcplugin.setContent(handle, 'tvshows')
xbmcplugin.endOfDirectory(handle)
def jump_to_seasons(params):
url = BASE_URL + params['u']
html = get_html(url)
container = common.parseDOM(html, 'ul', attrs={'class':'breadcrumbs'})
hrefs = common.parseDOM(container, 'a', attrs={'itemprop':'item'}, ret='href')
if len(hrefs) > 1:
params['mode'] = 'seasons'
params['u'] = hrefs[1].strip('/')
show_seasons(params)
def new_serials(params):
html = get_html('%s/new-serials/' % BASE_URL)
container = common.parseDOM(html, 'div', attrs={'class':'block-new-serials[ a-z0-9-]*'})
serials = common.parseDOM(container, 'div', attrs={'class':'new-serials-poster'})
hrefs = common.parseDOM(container, 'a', attrs={'class':'field-poster'}, ret='href')
if len(serials) > 0:
for i, serial in enumerate(serials):
img = common.parseDOM(serial, 'img', ret='src')[0].replace('/v1', '/v2')
title = common.parseDOM(serial, 'img', ret='alt')[0]
ids = common.parseDOM(serial, 'a', attrs={'class':'popover-btn'}, ret='data-serial-id')
u = hrefs[i].strip('/')
desc = get_description(u, ids[0])
id = int(ids[0]) / 1000
fan = ART_URL_PATTERN % (id, u)
menu = [('Обновить описание', 'Container.Update("%s?mode=description&u=%s&id=%s", False)' % (sys.argv[0], urllib.quote_plus(u), ids[0]))]
add_item(title, params={'mode':'seasons', 'u':u, 'i':ids[0]}, poster=img, fanart=fan, plot=desc, isFolder=True, menu=menu)
xbmcplugin.setContent(handle, 'tvshows')
xbmcplugin.endOfDirectory(handle)
def ABClist(params):
t = params.get('t', '0')
html = json.loads(get_html('%s/alphabet/%s/' % (BASE_URL, t)))['alphabet']
alphabet = common.parseDOM(html, 'ul', attrs={'id':'letters-list'})
abc = common.parseDOM(alphabet, 'a')
hrefs = common.parseDOM(alphabet, 'a', ret='href')
for i, letter in enumerate(abc):
title = letter
add_item(title, params={'mode':'serials', 't':t, 'letter':hrefs[i][1:]}, fanart=fanart, isFolder=True)
xbmcplugin.setContent(handle, 'videos')
xbmcplugin.endOfDirectory(handle)
def show_serials(params):
t = params.get('t', '0')
html = json.loads(get_html('%s/alphabet/%s/' % (BASE_URL, t)))['alphabet']
alphabet = common.parseDOM(html, 'div', attrs={'class':'literal', 'id':urllib.unquote_plus(params['letter'])})
serials = common.parseDOM(alphabet, 'li', attrs={'class':'literal__item not-loaded'})
ids = common.parseDOM(alphabet, 'li', attrs={'class':'literal__item not-loaded'}, ret='data-id')
for i, serial in enumerate(serials):
title = common.parseDOM(serial, 'a')[0]
u = common.parseDOM(serial, 'a', ret='href')[0].strip('/').encode('utf-8')
id = int(ids[i]) / 1000
img = IMG_URL_PATTERN % (id, u)
fan = ART_URL_PATTERN % (id, u)
desc = get_description(u, ids[i])
menu = [('Обновить описание', 'Container.Update("%s?mode=description&u=%s&id=%s", False)' % (sys.argv[0], urllib.quote_plus(u), ids[i]))]
add_item(title, params={'mode':'seasons', 'u':u, 'i':ids[i]}, poster=img, fanart=fan, plot=desc, isFolder=True, menu=menu)
xbmcplugin.setContent(handle, 'tvshows')
xbmcplugin.endOfDirectory(handle)
def show_seasons(params):
url = '%s/%s/' % (BASE_URL, params['u'])
html = get_html(url)
params['i'] = params.get('i', common.parseDOM(html, 'div', attrs={'class':'serial-item-rating clickonce'}, ret='data-id')[0])
id = int(params['i']) / 1000
img = IMG_URL_PATTERN % (id, params['u'])
fan = ART_URL_PATTERN % (id, params['u'])
plot = get_description(params['u'], params['i'])
container = common.parseDOM(html, 'div', attrs={'itemprop':'containsSeason'})
seasons = common.parseDOM(container[0] if len(container) > 1 else container, 'li')
if len(seasons) > 0:
for season in seasons:
title = 'Сезон ' + common.parseDOM(season, 'span', attrs={'itemprop':'seasonNumber'})[0].encode('utf8')
u = common.parseDOM(season, 'a', ret='href')[0].strip('/')
add_item(title, params={'mode':'season', 'u':u}, plot=plot, poster=img, fanart=fan, isFolder=True)
else:
# alloha
iframe = common.parseDOM(html, 'iframe', attrs={'id':'iframe-player'}, ret='src')
iframe = iframe[0] if iframe else ''
if 'alloha' in iframe:
from alloha import AllohaBalancer
alloha = AllohaBalancer(iframe)
try:
seasons = alloha.get_seasons()
except urllib2.HTTPError:
xbmcgui.Dialog().notification(PLUGIN_NAME, 'Видео не найдено', icon, 2000, True)
return
else:
for season in seasons:
add_item(season['title'], params={'mode':'season', 'u':params['u'], 's':season['id']}, plot=plot, poster=img, fanart=fan, isFolder=True)
if len(seasons) == 0:
show_season(params)
return
xbmcplugin.setContent(handle, 'seasons')
xbmcplugin.endOfDirectory(handle)
def show_sounds(html, params):
# alloha
iframe = common.parseDOM(html, 'iframe', attrs={'id':'iframe-player'}, ret='src')
iframe = iframe[0] if iframe else ''
if 'alloha' in iframe:
from alloha import AllohaBalancer
alloha = AllohaBalancer(iframe)
alloha.season = params.get('s')
alloha.episode = params.get('e')
translations = alloha.get_translations()
for translation in translations:
params['o'] = translation['id']
add_item(translation['title'], params, icon=icon, fanart=fanart, isPlayable=True, isFolder=False)
else:
translations = re.search(r"window\.playerData = '(\[.*\])';<", html, re.I and re.S)
if translations:
translations = json.loads(translations.group(1))
for i, player in enumerate(translations):
params['o'] = i
add_item(player['name'], params, icon=icon, fanart=fanart, isPlayable=True, isFolder=False)
if translations:
xbmcplugin.setContent(handle, 'videos')
xbmcplugin.endOfDirectory(handle)
def show_season(params):
page = int(params.get('page', 1))
url = '%s/%s/page/%s/' % (BASE_URL, params['u'], page) if page > 1 else '%s/%s/' % (BASE_URL, params['u'])
html = get_html(url, {'order':'asc'})
container = common.parseDOM(html, 'ul', attrs={'id':'episode_list'})
episodes = common.parseDOM(container, 'div', attrs={'class':'item-serial'})
if len(episodes) > 0:
for episode in episodes:
img = common.parseDOM(episode, 'div', attrs={'class':'field-img'}, ret='style')[0]
img = img[23:-3]
desc = common.parseDOM(episode, 'div', attrs={'class':'field-description'})[0]
desc = common.parseDOM(desc, 'a')[0]
title = common.replaceHTMLCodes(desc)
u = common.parseDOM(episode, 'a', ret='href')[0]
add_item(title, params={'mode':'episode', 'u':u}, thumb=img, fanart=fanart, isFolder=sound_mode==1, isPlayable=sound_mode==0)
# pagination
p = common.parseDOM(html, 'span', attrs={'class':'icon-chevron-thin-right'})
if len(p) > 0:
params['page'] = page + 1
add_item('Далее > %d' % params['page'], params=params, fanart=fanart, isFolder=True)
else:
# alloha
iframe = common.parseDOM(html, 'iframe', attrs={'id':'iframe-player'}, ret='src')
iframe = iframe[0] if iframe else ''
if 'alloha' in iframe:
from alloha import AllohaBalancer
alloha = AllohaBalancer(iframe)
alloha.season = params.get('s')
episodes = alloha.get_episodes()
for episode in episodes:
add_item(episode['title'], params={'mode':'episode', 'u':'/%s' % params['u'], 's':alloha.season, 'e':episode['id']}, icon=icon, fanart=fanart, isFolder=sound_mode==1, isPlayable=sound_mode==0)
xbmcplugin.setContent(handle, 'episodes')
xbmcplugin.endOfDirectory(handle)
def play_episode(params):
url = BASE_URL + params['u']
html = get_html(url)
block = common.parseDOM(html, 'div', attrs={'class':'limited-block-content'})
if block:
if addon.getSetting('UseProxy') == 'true':
html = get_html(url, useProxy=True)
else:
content = common.parseDOM(block, 'div', attrs={'class':'heading'})[0]
xbmcgui.Dialog().notification(PLUGIN_NAME, content, icon, 500, True)
return
o = 0 if sound_mode == 0 else int(params.get('o', -1))
if o == -1:
show_sounds(html, params)
return
purl = ''
surls = []
# alloha
iframe = common.parseDOM(html, 'iframe', attrs={'id':'iframe-player'}, ret='src')
iframe = iframe[0] if iframe else ''
if 'alloha' in iframe:
from alloha import AllohaBalancer
alloha = AllohaBalancer(iframe)
alloha.season = params.get('s')
alloha.episode = params.get('e')
alloha.translation = params.get('o')
purl = alloha.get_video()
if not purl:
data = re.search(r"window\.playerData = '(\[.*\])';<", html, re.I and re.S)
if data:
data = json.loads(data.group(1))
iframe = data[o]['player']
else:
iframe = ''
if 'alloha' in iframe:
from alloha import AllohaBalancer
alloha = AllohaBalancer(iframe)
purl = alloha.get_video()
elif 'vio.to' in iframe:
html = get_html(iframe)
s = re.search(r"link:.?'(.*?)'", html)
if s:
html = get_html(s.group(1))
s = re.findall(r"{url:.?'(.*?)'", html, re.I and re.S)
if s:
item = xbmcgui.ListItem(path='https:' + s[-1] + '|referer=https://vio.to/')
xbmcplugin.setResolvedUrl(handle, True, item)
elif 'stormo.tv' in iframe:
html = get_html(iframe)
s = re.search(r'file:"(\[.*?\](.*?)\/[,\"\n\r]+){1,}', html)
if s:
item = xbmcgui.ListItem(path=s.group(2))
xbmcplugin.setResolvedUrl(handle, True, item)
elif 'ok.ru' in iframe:
html = get_html(re.sub(r'^//', 'https://', iframe))
s = re.search(r'data-module="OKVideo" data-options="(.*?)" data-player-container-id', html)
if s:
data = s.group(1)
data = data.replace('\\\\', '\\')
data = data.replace('"', '"')
data = data.replace('\\u0026', '&')
data = data.replace('\\"', '"')
url = re.search(r'"hlsManifestUrl":"(.*?)",', data).group(1)
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(handle, True, item)
else:
html = get_html(iframe)
s = re.search(r'"hls":"(.*?\.m3u8)', html)
if not s:
block = re.search('<title>forbidden', html)
if block:
if addon.getSetting('UseProxy') == 'true':
html = get_html(iframe, useProxy=True, referer=url)
s = re.search(r'"hls":"(.*?\.m3u8)', html)
else:
content = common.parseDOM(html, 'div')[0]
xbmcgui.Dialog().notification(PLUGIN_NAME, content, icon, 500, True)
return
if s:
purl = s.group(1).replace(r'\/', '/').replace(r'\r', '').replace(r'\n', '')
s = re.search(r'data-ru_subtitle="(.*?)"', html)
iframe_parts = urlparse.urlsplit(iframe)
if s:
surl = s.group(1)
if surl and surl[0] == '/':
surl = '%s://%s%s' % (iframe_parts.scheme, iframe_parts.netloc, surl)
surls.append(fix_sub(surl))
s = re.search(r'data-en_subtitle="(.*?)"', html)
if s:
surl = s.group(1)
if surl and surl[0] == '/':
surl = '%s://%s%s' % (iframe_parts.scheme, iframe_parts.netloc, surl)
surls.append(fix_sub(surl, 'en_'))
if purl:
item = xbmcgui.ListItem(path=purl)
surls = [i for i in surls if i]
if surls:
item.setSubtitles(surls)
else:
item.setProperty('inputstreamaddon', 'inputstream.adaptive')
item.setProperty('inputstream.adaptive.manifest_type', 'hls')
xbmcplugin.setResolvedUrl(handle, True, item)
def fix_sub(surl, prefix='ru_'):
if surl:
vtt | |
or tcl_user_app.fpga['custom'] != 'GAScore':
# Connect the AXI input switch to the Galapagos router
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S01_AXIS'
}
)
elif len(s_axis_array) == 1:
if (sim == 1):
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name': s_axis_array[0]['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis_array[0]['name']
}
)
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'S01_AXIS'
}
)
else:
# there's no input switch in this case
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name': s_axis_array[0]['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis_array[0]['name']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S01_AXIS'
}
)
m_axis_array = getInterfaces(tcl_user_app.fpga, 'm_axis', 'scope', 'global')
# Now connect all m_axis interfaces through the output switch into the
# Galapagos router
#no output switch, direct connect if only one
if len(m_axis_array) == 1:
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
instName = m_axis_array[0]['kernel_inst']['inst']
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name': m_axis_array[0]['name']
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_in_V'
}
)
elif len(m_axis_array) > 1:
for idx, m_axis in enumerate(m_axis_array):
instName = m_axis['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{
'name': instName ,
'type':'intf',
'port_name': m_axis['name']
},
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'S'+ idx_str + '_AXIS'
}
)
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_in_V'
}
)
# Now handle the control interfaces
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
for idx, s_axi in enumerate(s_axi_array):
instName = s_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{'name': instName,
'type':'intf',
'port_name':s_axi['name']
}
)
# And finally the off-chip memory interface
enable_AXI_mem_interconnect = True
if 'custom' in tcl_user_app.fpga:
if tcl_user_app.fpga['custom'] == 'GAScore':
enable_AXI_mem_interconnect = False
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'global')
if enable_AXI_mem_interconnect:
for idx, m_axi in enumerate(m_axi_array):
instName = m_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name':m_axi['name']
},
{
'name':'applicationRegion/axi_interconnect_mem',
'type':'intf',
'port_name':'S' +idx_str + '_AXI'
}
)
else:
tcl_custom = tclMeFile( outDir + '/' + str(tcl_user_app.fpga['num']) + '_custom', tcl_user_app.fpga)
memory_lines = []
prev_instName = ""
curr_row = -1
curr_col = 0
for idx, m_axi in enumerate(m_axi_array):
instName = m_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
if instName != prev_instName:
curr_row += 1
tcl_custom.tprint('set CUSTOM_arr(' + str(curr_row) + ',0) ' + instName)
prev_instName = instName
curr_col = 1
else:
curr_col += 1
tcl_custom.tprint('set CUSTOM_arr(' + str(curr_row) + ',' + str(curr_col) + ') ' + m_axi['name'])
def add_debug_interfaces(outDir, fpga):
m_axi_interfaces = getInterfaces(tcl_debug_app.fpga, 'm_axi', 'debug')
s_axi_interfaces = getInterfaces(tcl_debug_app.fpga, 's_axi', 'debug')
s_axis_interfaces = getInterfaces(tcl_debug_app.fpga, 's_axis', 'debug')
m_axis_interfaces = getInterfaces(tcl_debug_app.fpga, 'm_axis', 'debug')
wire_master_interfaces = getInterfaces(tcl_debug_app.fpga, 'wire_master', 'debug')
wire_slave_interfaces = getInterfaces(tcl_debug_app.fpga, 'wire_slave', 'debug')
#instantiate ila
if (len(m_axi_interfaces) + len(s_axi_interfaces) + len(s_axis_interfaces) + len(m_axis_interfaces) + len(wire_master_interfaces) + len(wire_slave_interfaces)) > 1:
tcl_debug_app = tclMeFile( outDir + '/' + str(fpga['num']) + '_debug')
tcl_debug_app.instBlock(
{
'name':'system_ila',
'inst':'system_ila_inst',
'clks':['clk'],
'resetns':['resetn']
}
)
#set properties
properties = []
#by default interface is AXI, only need to set interface for axis and wires
len_native = len(wire_slave_interfaces) + len(wire_master_interfaces)
len_interface = len(s_axis_interfaces) + len(m_axis_interfaces) + len(s_axi_interfaces) + len(m_axi_interfaces)
if len_native > 0 and len_interface > 0:
properties.append('CONFIG.C_MON_TYPE {MIXED}')
elif len_native > 0 and len_interface == 0:
properties.append('CONFIG.C_MON_TYPE {NATIVE}')
starting_idx = len(s_axi_interfaces) + len(m_axi_interfaces)
for axis_idx in range(starting_idx, starting_idx + len(s_axis_interfaces) + len(m_axis_interfaces)):
properties.append('CONFIG.C_SLOT_' + str(axis_idx) + '_INTF_TYPE {xilinx.com:interface:axis_rtl:1.0}')
for axi_idx, axi_interface in enumerate(s_axi_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axi_idx) + '_AXI'
},
{
'name': axi_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axi_interface['name']
}
)
slot_offset = len(s_axi_interfaces)
for axi_idx, axi_interface in enumerate(m_axi_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axi_idx + slot_offset) + '_AXI'
},
{
'name': axi_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axi_interface['name']
}
)
slot_offset = slot_offset + len(m_axi_interfaces)
for axis_idx, axis_interface in enumerate(m_axis_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axis_idx + slot_offset) + '_AXIS'
},
{
'name': axis_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axis_interface['name']
}
)
slot_offset = slot_offset + len(m_axis_interfaces)
for axis_idx, axis_interface in enumerate(s_axis_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axis_idx + slot_offset) + '_AXIS'
},
{
'name': axis_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axis_interface['name']
}
)
for wire_idx, wire_interface in enumerate(wire_master_interfaces):
tcl_user_app.makeConnection(
'net',
{
'name':'system_ila_inst',
'type':'pin',
'port_name':'probe' + str(wire_idx)
},
{
'name': wire_interface['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_interface['name']
}
)
wire_offset = len(wire_master_interfaces)
for wire_idx, wire_interface in enumerate(wire_slave_interfaces):
tcl_user_app.makeConnection(
'net',
{
'name':'system_ila_inst',
'type':'pin',
'port_name':'probe' + str(wire_idx + wire_offset)
},
{
'name': wire_interface['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_interface['name']
}
)
tcl_debug_app.close()
def getKernel(fpga, num):
for kern in fpga['kernel']:
if int(kernel['num']) == num:
return kern
return None
def getSlaveAddressInfo(s_axi):
slave_inst = s_axi['kernel_inst']['inst']
slave_inst = slave_inst.split('/')[1]
if (s_axi['kernel_inst']['lib'] == 'hls'):
slave_port = 'Data_' + s_axi['name']
else:
slave_port = s_axi['name']
if 'base' in s_axi:
slave_base = s_axi['base']
else:
slave_base = 'Reg'
properties = {}
if 'offset' in s_axi:
properties.update({'offset': s_axi['offset']})
if 'range' in s_axi:
properties.update({'range': s_axi['range']})
return slave_inst, slave_port, slave_base, properties
def userApplicationRegionAssignAddresses(tcl_user_app, shared):
"""
connect mem interconnect and assign addresses, all kernels need to be 32 bit addressable
connect ctrl interconnect and assign addresses
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
shared: Not really sure what this is for
"""
if 'custom' in tcl_user_app.fpga and tcl_user_app.fpga['custom'] == 'GAScore':
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
master = 'S_AXI_CONTROL'
for global_s_axi in s_axi_array:
slave_inst = global_s_axi['kernel_inst']['inst']
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(global_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
return
#global m_axi
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'global')
tcl_user_app.assign_address(None, 'S_AXI_MEM_0', 'Reg')
if shared:
tcl_user_app.assign_address(None, 'S_AXI_MEM_1', 'Reg')
for global_m_axi in m_axi_array:
instName = global_m_axi['kernel_inst']['inst']
if(global_m_axi['kernel_inst']['lib'] == 'hls'):
master = instName + '/Data_' + global_m_axi['name']
else:
master = instName + '/' + global_m_axi['name']
properties = {'offset': '0x00000000', 'range': '4G'}
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_0', 'Reg', master, offset='0x00000000')
if shared:
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_1', 'Reg', master, offset='0x00000000')
for global_m_axi in m_axi_array:
instName = global_m_axi['kernel_inst']['inst']
if(global_m_axi['kernel_inst']['lib'] == 'hls'):
master = instName + '/Data_' + global_m_axi['name']
else:
master = instName + '/' + global_m_axi['name']
properties = {'range': '4G'}
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_0', 'Reg', master, **properties)
if shared:
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_1', 'Reg', master, **properties)
#global s_axi
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
master = 'S_AXI_CONTROL'
# set up the address space for the memories that were added in raw mode
if tcl_user_app.fpga['comm'] == 'raw':
slave_inst = "applicationRegion/ctrl_blk_mem_switch_rom"
slave_port = "S_AXI"
slave_base = "Mem0"
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
slave_inst = "ctrl_blk_mem_switch_rom"
# range is done first because if offset is done first, depending on the range, it can be misaligned
prop = {'range': '4K'}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
prop = {'offset': "0x0000"}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
slave_inst = "applicationRegion/ctrl_blk_mem_switch_rom_mac"
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
slave_inst = "ctrl_blk_mem_switch_rom_mac"
prop = {'range': '4K'}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
prop = {'offset': "0x1000"}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
for global_s_axi in s_axi_array:
slave_inst = global_s_axi['kernel_inst']['inst']
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(global_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
#local m_axi and s_axi
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'local')
for local_m_axi in m_axi_array:
if (local_m_axi['kernel_inst']['lib'] == 'hls'):
master_port = 'Data_' + local_m_axi['name']
else:
master_port = local_m_axi['name']
s_axi_array = getSlaveInterfaces(tcl_user_app.fpga, 's_axi', local_m_axi)
for local_s_axi in s_axi_array:
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(local_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, local_m_axi['kernel_inst']['inst'] + '/' + master_port, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, local_m_axi['kernel_inst']['inst'] + '/' + master_port, **prop)
def userApplicationLocalConnections(tcl_user_app):
"""
Takes care of generating the TCL commands for wiring up the <scope>local</scope>
connections between kernels, as defined in the logical file.
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 23 18:11:20 2021
@author: lukepinkel
"""
import numpy as np
import scipy as sp
import scipy.stats
import pandas as pd
from .rotation import rotate, oblique_constraint_derivs
from ..utilities.linalg_operations import vec, invec, vech, vecl, invecl, vecl_inds
from ..utilities.special_mats import nmat, dmat, lmat
from ..utilities.numerical_derivs import so_gc_cd
from ..utilities.data_utils import _check_type, cov, eighs, flip_signs
from .fit_measures import srmr, lr_test, gfi, agfi
class FactorAnalysis(object):
def __init__(self, X=None, S=None, n_obs=None, n_factors=None,
rotation_method=None, **n_factor_kws):
"""
Exploratory Factor Analysis
Parameters
----------
X : dataframe, optional
A dataframe of size (n x p) containing the n observations and p
variables to be analyzed
S : arraylike
Dataframe or ndarray containing (p x p) variables to be analyzed
n_obs : int or float, optional
If X is not provided, and the covariance matrix S is, then
n_obs is required to compute test statistics
n_factors : int float or str, optional
The number of factors to model. If a string, must be one of
'proportion' or 'eigmin'.
rotation_method : str, optional
One of the rotation methods, e.g. quartimax, varimax, equamax
Keyword Arguments:
proportion : float
Float in [0, 1] specifying the cutoff variance explained when
calculating the number of factors to model
eigmin : float
The eigenvalue cutoff when determining the number of factors
Notes
-----
In the docs, p will be used to denote the the number of variables,
and q the number of factors, t the number of parameters, k the
number of covariance parameters to model, and m the number of
parameters in the explicit 'augmented' model
t = p * (q + 1)
k = p * (p + 1) // 2
m = p * (q + 1) + q * (q - 1) // 2 = t + q * (q - 1) // 2
"""
self._process_data(X, S, n_obs)
self._get_n_factors(n_factors, **n_factor_kws)
self._rotation_method = rotation_method
self._make_params()
self.E = np.eye(self.n_vars)
self.Ik = np.eye(self.n_facs)
self.Nk = nmat(self.n_facs).A
self.Lk = lmat(self.n_facs).A
self.Ip = np.eye(self.n_vars)
self.Dp = dmat(self.n_vars).A
self.Lp = lmat(self.n_vars).A
self.Np = nmat(self.n_vars).A
self.LpNp = np.dot(self.Lp, self.Np)
self.d_inds = vec(self.Ip)==1
self.l_inds = vecl_inds(self.n_facs)
def _process_data(self, X, S, n_obs):
given_x = X is not None
given_s = S is not None
given_n = n_obs is not None
if given_x and not given_s:
X, cols, inds, _is_pd = _check_type(X)
S = cov(X)
n_obs, n_vars = X.shape
if not given_x and given_s and given_n:
S, cols, _, _is_pd = _check_type(S)
n_vars = S.shape[0]
inds = np.arange(n_obs)
u, V = eighs(S)
V = flip_signs(V)
self.X, self.cols, self.inds, self._is_pd = X, cols, inds, _is_pd
self.S, self.V, self.u = S, V, u
self.cols, self.inds, self._is_pd = cols, inds, _is_pd
self.n_obs, self.n_vars = n_obs, n_vars
def _get_n_factors(self, n_factors, proportion=0.6, eigmin=1.0):
if type(n_factors) is str:
if n_factors == 'proportion':
n_factors = np.sum((self.u.cumsum()/self.u.sum())<proportion)+1
elif n_factors == 'eigmin':
n_factors = np.sum(self.u>eigmin)
elif type(n_factors) in [float, int]:
n_factors = int(n_factors)
self.n_facs = self.n_factors = n_factors
def _make_params(self):
self.n_pars = self.n_vars*self.n_facs + self.n_vars
self.theta = np.zeros(self.n_pars)
self.lix=np.arange(self.n_vars*self.n_facs)
self.pix =np.arange(self.n_vars*self.n_facs, self.n_vars*self.n_facs+self.n_vars)
L = self.V[:, :self.n_facs]
psi = np.diag(self.S - np.dot(L, L.T))
self.theta[self.lix] = vec(L)
self.theta[self.pix] = np.log(psi)
def model_matrices(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
L : ndarray
(p x q) matrix of loadings.
Psi : ndarray
(p x p) diagonal matrix of residual covariances.
"""
L = invec(theta[self.lix], self.n_vars, self.n_facs)
Psi = np.diag(np.exp(theta[self.pix]))
return L, Psi
def implied_cov(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
Sigma : ndarray
(p x p) implied covariance matrix.
"""
L, Psi = self.model_matrices(theta)
Sigma = L.dot(L.T) + Psi
return Sigma
def loglike(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
ll : float
Loglikelihood of the model.
"""
Sigma = self.implied_cov(theta)
_, lndS = np.linalg.slogdet(Sigma)
trSV = np.trace(np.linalg.solve(Sigma, self.S))
ll = lndS + trSV
return ll
def gradient(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
g : ndarray
ndarray of length t containing the derivatives of the
loglikelihood with respect to theta.
"""
L, Psi = self.model_matrices(theta)
Sigma = L.dot(L.T) + Psi
V = np.linalg.pinv(Sigma)
VRV = V.dot(Sigma - self.S).dot(V)
g1 = 2 * vec(VRV.dot(L))
g2 = np.diag(VRV.dot(Psi))
g = np.zeros(self.n_pars)
g[self.lix] = g1
g[self.pix] = g2
return g
def hessian_approx(self, theta):
H = so_gc_cd(self.gradient, theta)
return H
def dsigma(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
G : ndarray
(k x t) matrix of derivatives of the implied covariance with
respect to parameters
"""
L, Psi = self.model_matrices(theta)
DLambda = np.dot(self.LpNp, np.kron(L, self.Ip))
DPsi = np.dot(self.Lp, np.diag(vec(Psi)))[:, self.d_inds]
G = np.block([DLambda, DPsi])
return G
def hessian(self, theta):
"""
Parameters
----------
theta : ndarray
ndarray of length t containing model parameters.
Returns
-------
H : ndarray
(t x t) matrix of second derivative of the log likelihood with
respect to the parameters
"""
L, Psi = self.model_matrices(theta)
Sigma = L.dot(L.T) + Psi
Sigma_inv = np.linalg.inv(Sigma)
Sdiff = self.S - Sigma
d = vech(Sdiff)
G = self.dsigma(theta)
DGp = self.Dp.dot(G)
W1 = np.kron(Sigma_inv, Sigma_inv)
W2 = np.kron(Sigma_inv, Sigma_inv.dot(Sdiff).dot(Sigma_inv))
H1 = 0.5 * DGp.T.dot(W1).dot(DGp)
H2 = 1.0 * DGp.T.dot(W2).dot(DGp)
Hpp = []
Dp, Ik, E = self.Dp, self.Ik, self.E
Hij = np.zeros((self.n_pars, self.n_pars))
for i in range(self.n_vars):
for j in range(i, self.n_vars):
eij = np.zeros(self.n_vars)
if i==j:
eij[i] = 1.0
E[i, j] = 1.0
T = E + E.T
H11 = np.kron(Ik, T)
H22 = np.diag(Psi) * eij
Hij[self.lix, self.lix[:, None]] = H11
Hij[self.pix, self.pix] = H22
E[i, j] = 0.0
Hpp.append(Hij[:, :, None])
Hij = Hij*0.0
W = np.linalg.multi_dot([Dp.T, W1, Dp])
dW = np.dot(d, W)
Hp = np.concatenate(Hpp, axis=2)
H3 = np.einsum('k,ijk ->ij', dW, Hp)
H = (H1 + H2 - H3 / 2.0)*2.0
return H
def _make_augmented_params(self, L, Phi, Psi):
"""
Parameters
----------
L : ndarray
(p x q) array of loadings.
Phi : ndarray
(q x q) factor covariance matrix.
Psi : ndarray
(p x p) diagonal matrix of residual covariances.
Returns
-------
None.
Notes
-----
If rotation_method is not None, then a params vector is added to
account for the restricted parameters when computing the hessian.
Makes implicit assumptions about factor covariance explicit by
augmenting theta with (q - 1) * q // 2 factor covariance parameters.
"""
p, q = self.n_vars, self.n_facs
nl = p * q
nc = q * (q - 1) // 2 if self._rotation_method is not None else 0
nr = p
nt = nl + nc + nr
params = np.zeros(nt)
ixl = np.arange(nl)
ixc = np.arange(nl, nl+nc)
ixr = np.arange(nl+nc, nl+nc+nr)
params[ixl] = vec(L)
if self._rotation_method is not None:
params[ixc] = vecl(Phi)
params[ixr] = np.diag(Psi)
self.nl, self.nc, self.nr, self.nt = nl, nc, nr, nt
self.ixl, self.ixc, self.ixr = ixl, ixc, ixr
self.params = params
def model_matrices_augmented(self, params):
"""
Parameters
----------
params : ndarray
ndarray of length m containing model parameters..
Returns
-------
L : ndarray
(p x q) array of loadings.
Phi : ndarray
(q x q) factor covariance matrix..
Psi : ndarray
(p x p) diagonal matrix of residual covariances.
"""
L = invec(params[self.ixl], self.n_vars, self.n_facs)
if self._rotation_method is not None:
Phi = invecl(params[self.ixc])
else:
Phi = np.eye(self.n_facs)
Psi = np.diag(params[self.ixr])
return L, Phi, Psi
def dsigma_augmented(self, params):
"""
Parameters
----------
params : ndarray
ndarray of length m containing model parameters.
Returns
-------
G : ndarray
(k x m) matrix of derivatives of the implied covariance with
respect to parameters
"""
L, Phi, Psi = self.model_matrices_augmented(params)
DLambda = np.dot(self.LpNp, np.kron(L.dot(Phi), self.Ip))
DPhi | |
<reponame>uruzahe/carla
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2007-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file routeChoices.py
# @author <NAME>
# @author <NAME>
# @author <NAME>
# @date 2007-02-27
"""
This script is to calculate the route choice probabilities based on different methods.
- Gawron
- step-size (TBD)
- ......
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import random
import math
from xml.sax import handler
from xml.sax import parse
class Vehicle:
def __init__(self, label, depart, departlane='first', departpos='base', departspeed=0):
self.label = label
self.CO_abs = 0.
self.CO2_abs = 0.
self.HC_abs = 0.
self.PMx_abs = 0.
self.NOx_abs = 0.
self.fuel_abs = 0.
self.routesList = []
# self.speed = 0.
self.depart = float(depart)
self.departlane = departlane
self.departpos = departpos
self.departspeed = departspeed
self.selectedRoute = None
class Edge:
def __init__(self, label):
self.label = label
self.length = 0.
self.freespeed = 0.
self.CO_abs = 0.
self.CO2_abs = 0.
self.HC_abs = 0.
self.PMx_abs = 0.
self.NOx_abs = 0.
self.fuel_abs = 0.
self.traveltime = 0.
self.CO_perVeh = 0.
self.CO2_perVeh = 0.
self.HC_perVeh = 0.
self.PMx_perVeh = 0.
self.NOx_perVeh = 0.
self.fuel_perVeh = 0.
# only one veh on the edge
self.fuel_perVeh_default = 0.
self.CO_perVeh_default = 0.
self.CO2_perVeh_default = 0.
self.HC_perVeh_default = 0.
self.PMx_perVeh_default = 0.
self.NOx_perVeh_default = 0.
self.fuel_perVeh_default = 0.
self.freetraveltime = 0.
pathNum = 0
class Route:
def __init__(self, edges):
global pathNum
self.label = "%s" % pathNum
pathNum += 1
self.edges = edges
# self.ex_probability = None
self.probability = 0.
self.selected = False
self.ex_cost = 0.
self.act_cost = 0.
class netReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgesMap = edgesMap
self._edgeObj = None
def startElement(self, name, attrs):
if name == 'edge' and 'function' not in attrs:
if attrs['id'] not in self._edgesMap:
self._edgeObj = Edge(attrs['id'])
self._edgesList.append(self._edgeObj)
self._edgesMap[attrs['id']] = self._edgeObj
if self._edgeObj and name == 'lane':
self._edgeObj.length = float(attrs['length'])
self._edgeObj.freespeed = float(attrs['speed'])
self._edgeObj.freetraveltime = self._edgeObj.length / \
self._edgeObj.freespeed
def endElement(self, name):
if name == 'edge':
self._edgeObj = None
class addweightsReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgesMap = edgesMap
self._edgObj = None
def startElement(self, name, attrs):
if name == 'edge':
if attrs['id'] in self._edgesMap:
self._edgeObj = self._edgesMap[attrs['id']]
if 'traveltime' in attrs:
self._edgeObj.freetraveltime = float(attrs['traveltime'])
if 'CO_perVeh' in attrs:
self._edgeObj.CO_perVeh_default = float(attrs['CO_perVeh'])
if 'CO2_perVeh' in attrs:
self._edgeObj.CO2_perVeh_default = float(attrs['CO2_perVeh'])
if 'HC_perVeh' in attrs:
self._edgeObj.HC_perVeh_default = float(attrs['HC_perVeh'])
if 'PMx_perVeh' in attrs:
self._edgeObj.PMx_perVeh_default = float(attrs['PMx_perVeh'])
if 'NOx_perVeh' in attrs:
self._edgeObj.NOx_perVeh_default = float(attrs['NOx_perVeh'])
if 'fuel_perVeh' in attrs:
self._edgeObj.fuel_perVeh_default = float(attrs['fuel_perVeh'])
if 'fuel_abs' in attrs:
self._edgeObj.fuel_abs_default = float(attrs['fuel_abs'])
if 'NOx_abs' in attrs:
self._edgeObj.NOx_abs_default = float(attrs['NOx_abs'])
if 'PMx_abs' in attrs:
self._edgeObj.PMx_abs_default = float(attrs['PMx_abs'])
if 'HC_abs' in attrs:
self._edgeObj.HC_abs_default = float(attrs['HC_abs'])
if 'CO2_abs' in attrs:
self._edgeObj.CO2_abs_default = float(attrs['CO2_abs'])
if 'CO_abs' in attrs:
self._edgeObj.CO_abs_default = float(attrs['CO_abs'])
class routeReader(handler.ContentHandler):
def __init__(self, vehList, vehMap):
self._vehList = vehList
self._vehMap = vehMap
self._vehObj = None
self._routObj = None
def startElement(self, name, attrs):
if name == 'vehicle':
if ('departPos' in attrs):
self._vehObj = Vehicle(attrs['id'], attrs['depart'], attrs[
'departLane'], attrs['departPos'], attrs['departSpeed'])
else:
self._vehObj = Vehicle(attrs['id'], attrs['depart'])
self._vehMap[attrs['id']] = self._vehObj
self._vehList.append(self._vehObj)
if self._vehObj and name == 'route':
edgesList = attrs['edges'].split(' ')
self._routObj = Route(" ".join(edgesList))
self._vehObj.routesList.append(self._routObj)
def endElement(self, name):
if name == 'vehicle':
self._vehObj = None
self._routObj = None
class vehrouteReader(handler.ContentHandler):
def __init__(self, vehList, vehMap, edgesMap, fout, foutrout, ecoMeasure, alpha, beta):
self._vehList = vehList
self._vehMap = vehMap
self._edgesMap = edgesMap
self._fout = fout
self._foutrout = foutrout
self._ecoMeasure = ecoMeasure
self._newroutesList = []
self._alpha = alpha
self._beta = beta
self._vehObj = None
self._routObj = None
self._selected = None
self._currentSelected = None
self._count = 0
self._existed = False
def startElement(self, name, attrs):
if name == 'vehicle':
self._vehObj = self._vehMap[attrs['id']]
if self._vehObj and name == 'routeDistribution':
self._currentSelected = attrs['last']
if self._vehObj and name == 'route':
if self._count == int(self._currentSelected):
self._vehObj.selectedRouteEdges = attrs['edges']
self._count += 1
for r in self._vehObj.routesList:
if r.edges == attrs['edges']:
self._existed = True
self._routObj = r
break
if not self._existed:
self._routObj = Route(attrs['edges'])
self._vehObj.routesList.append(self._routObj)
if 'probability' in attrs:
self._routObj.probability = float(attrs['probability'])
if self._routObj.probability == 0.0:
# check with Micha if there is a better way to avoid the
# prob. = 0.
self._routObj.probability = 1.02208127529e-16
if 'cost' in attrs:
self._routObj.ex_cost = float(attrs['cost'])
for e in self._routObj.edges.split(' '):
eObj = self._edgesMap[e]
if self._ecoMeasure != 'fuel' and eObj.traveltime == 0.:
self._routObj.act_cost += eObj.freetraveltime
elif self._ecoMeasure != 'fuel' and eObj.traveltime > 0.:
self._routObj.act_cost += eObj.traveltime
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh == 0.:
self._routObj.act_cost += eObj.fuel_perVeh_default
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh > 0.:
self._routObj.act_cost += eObj.fuel_perVeh
if self._routObj.ex_cost == 0.:
self._routObj.ex_cost = self._routObj.act_cost
def endElement(self, name):
if name == 'vehicle':
# if len(self._vehObj.routesList) == 1:
# self._vehObj.routesList[0].probability = 1.
# for the routes which are from the sumo's rou.alt.xml file
for r in self._vehObj.routesList:
if r.act_cost == 0.:
for e in r.edges.split(' '):
eObj = self._edgesMap[e]
if self._ecoMeasure != 'fuel' and eObj.traveltime == 0.:
r.act_cost += eObj.freetraveltime
elif self._ecoMeasure != 'fuel' and eObj.traveltime > 0.:
r.act_cost += eObj.traveltime
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh == 0.:
r.act_cost += eObj.fuel_perVeh_default
elif self._ecoMeasure == 'fuel' and eObj.fuel_perVeh > 0.:
r.act_cost += eObj.fuel_perVeh
if r.ex_cost == 0.:
r.ex_cost = r.act_cost
# calcuate the probabilites for the new routes
if not r.probability:
r.probability = 1. / float(len(self._vehObj.routesList))
print('new probability for route', r.label,
'for veh', self._vehObj.label)
self._newroutesList.append(r)
# adjust the probabilites of the existing routes due to the new
# routes
if len(self._newroutesList) > 0:
addProb = 0.
origProbSum = 0.
for r in self._vehObj.routesList:
if r in self._newroutesList:
addProb += r.probability
else:
origProbSum += r.probability
for r in self._vehObj.routesList:
if r not in self._newroutesList:
r.probability = r.probability / \
origProbSum * (1. - addProb)
# update the costs of routes not used by the driver
for r in self._vehObj.routesList:
if r.edges != self._vehObj.selectedRouteEdges:
r.act_cost = self._beta * r.act_cost + \
(1. - self._beta) * r.ex_cost
# calcuate the route choice probabilities based on Gawron
# todo: add "one used route to all routes"
for r1 in self._vehObj.routesList:
for r2 in self._vehObj.routesList:
if r1.label != r2.label:
gawron(r1, r2, self._alpha)
# decide which route will be selected
randProb = random.random()
if len(self._vehObj.routesList) == 1:
self._vehObj.routesList[0].probability = 1.
self._selected = 0
else:
cumulatedProbs = 0.
for i, r in enumerate(self._vehObj.routesList):
cumulatedProbs += r.probability
if cumulatedProbs >= randProb:
self._selected = i
break
# generate the *.rou.xml
self._foutrout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (self._vehObj.label, self._vehObj.depart, self._vehObj.departlane,
self._vehObj.departpos, self._vehObj.departspeed))
self._foutrout.write(
' <route edges="%s"/>\n' % self._vehObj.routesList[self._selected].edges)
self._foutrout.write(' </vehicle> \n')
# generate the *.rou.alt.xml
self._fout.write(' <vehicle id="%s" depart="%.2f" departLane="%s" departPos="%s" departSpeed="%s">\n'
% (self._vehObj.label, self._vehObj.depart, self._vehObj.departlane,
self._vehObj.departpos, self._vehObj.departspeed))
self._fout.write(
' <routeDistribution last="%s">\n' % self._selected)
for route in self._vehObj.routesList:
self._fout.write(' <route cost="%.4f" probability="%s" edges="%s"/>\n' % (
route.act_cost, route.probability, route.edges))
self._fout.write(' </routeDistribution>\n')
self._fout.write(' </vehicle> \n')
self._newroutesList = []
self._vehObj = None
self._selected = None
self._currentSelected = None
self._count = 0
if name == 'route':
self._routObj = None
if (name == 'route-alternatives' or name == 'routes'):
self._fout.write('</route-alternatives>\n')
self._fout.close()
self._foutrout.write('</routes>\n')
self._foutrout.close()
class dumpsReader(handler.ContentHandler):
def __init__(self, edgesList, edgesMap):
self._edgesList = edgesList
self._edgeObj = None
self._edgesMap = edgesMap
def startElement(self, name, attrs):
if name == 'edge':
if attrs['id'] not in self._edgesMap:
self._edgeObj = Edge(attrs['id'])
self._edgesList.append(self._edgeObj)
self._edgesMap[attrs['id']] = self._edgeObj
else:
self._edgeObj = self._edgesMap[attrs['id']]
if 'traveltime' in attrs:
self._edgeObj.traveltime = float(attrs['traveltime'])
if 'CO_perVeh' in attrs:
self._edgeObj.CO_perVeh = float(attrs['CO_perVeh'])
if 'CO2_perVeh' in attrs:
self._edgeObj.CO2_perVeh = float(attrs['CO2_perVeh'])
if 'HC_perVeh' in attrs:
self._edgeObj.HC_perVeh = float(attrs['HC_perVeh'])
if 'PMx_perVeh' in attrs:
self._edgeObj.PMx_perVeh = float(attrs['PMx_perVeh'])
if 'NOx_perVeh' in attrs:
self._edgeObj.NOx_perVeh = float(attrs['NOx_perVeh'])
if 'fuel_perVeh' in attrs:
self._edgeObj.fuel_perVeh = float(attrs['fuel_perVeh'])
if 'fuel_abs' in attrs:
self._edgeObj.fuel_abs = float(attrs['fuel_abs'])
if 'NOx_abs' in attrs:
self._edgeObj.NOx_abs = float(attrs['NOx_abs'])
if 'PMx_abs' in attrs:
self._edgeObj.PMx_abs = float(attrs['PMx_abs'])
if 'HC_abs' in attrs:
self._edgeObj.HC_abs = float(attrs['HC_abs'])
if 'CO2_abs' in attrs:
self._edgeObj.CO2_abs = float(attrs['CO2_abs'])
if 'CO_abs' in attrs:
self._edgeObj.CO_abs = | |
<reponame>uruzahe/carla<filename>Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/simulation/sumo.py
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file sumo.py
# @author <NAME>
# @date
import os
import sys
import string
from xml.sax import saxutils, parse, handler
if __name__ == '__main__':
try:
APPDIR = os.path.dirname(os.path.abspath(__file__))
except:
APPDIR = os.path.dirname(os.path.abspath(sys.argv[0]))
SUMOPYDIR = os.path.join(APPDIR, '..', '..')
sys.path.append(SUMOPYDIR)
try:
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
print("please declare environment variable 'SUMO_HOME'")
import traci
import traci.constants as tc
except:
print 'WARNING: No module traci in syspath. Please provide SUMO_HOME.'
traci = None
from coremodules.modules_common import *
import numpy as np
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
#from agilepy.lib_base.misc import get_inversemap
#from agilepy.lib_base.geometry import find_area
from agilepy.lib_base.processes import Process, CmlMixin, ff, call, P
from coremodules.network.network import SumoIdsConf
def write_netconfig(filename_netconfig, filename_net,
filename_routes='',
filename_poly=None,
dirname_output='',
starttime=None, stoptime=None,
time_step=1.0,
time_to_teleport=-1,
pedestrian_model='None',
width_sublanes=-1.0,
filename_ptstops=None,
filepath_output_vehroute=None,
filepath_output_tripinfo=None,
filepath_output_edgedata=None,
filepath_output_lanedata=None,
filepath_output_edgeemissions=None,
filepath_output_laneemissions=None,
filepath_output_edgenoise=None,
filepath_output_lanenoise=None,
freq=60,
is_exclude_emptyedges=False,
is_exclude_emptylanes=False,
is_ignore_route_errors=True,
filepath_gui=None,
seed=1025,
is_openscenegraph=False,
width_pedestrian_striping=0.49,
slowdownfactor_pedestrian_striping=0.2,
jamtime_pedestrian_striping=20,
is_collission_check_junctions=True,
is_ignore_accidents=False,
collission_action='teleport',
):
"""
filename_netconfig = output filename of network config file without path
filename_net = input filename of network file without path
filename_rou = input filename of routes file without path
filename_poly = input filename of polygons file without path
dirname_output = directory where config, network, route and poly file reside
"""
# print 'write_netconfig >>%s<<'%filename_netconfig
# print ' filename_poly=>>%s<<'%filename_poly
if dirname_output:
filepath_netconfig = os.path.join(dirname_output, filename_netconfig)
else:
filepath_netconfig = filename_netconfig
if (filepath_output_edgedata is not None)\
| (filepath_output_lanedata is not None)\
| (filepath_output_edgeemissions is not None)\
| (filepath_output_laneemissions is not None)\
| (filepath_output_edgenoise is not None)\
| (filepath_output_lanenoise is not None):
# filename of additional files:
filename_add = string.join(filename_netconfig.split('.')[:-2]+['outc.xml'], '.')
filepath_add = os.path.join(dirname_output, filename_add)
# print ' filepath_add',filepath_add
else:
filename_add = None
simfile = open(filepath_netconfig, 'w')
simfile.write(
"""<?xml version="1.0"?>
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="https://sumo.sf.net/xsd/sumoConfiguration.xsd">
<input>\n""")
simfile.write(' <net-file value="%s"/>\n' % filename_net)
if filename_routes != "":
simfile.write(' <route-files value="%s"/>\n' % filename_routes)
# print ' filename_add',filename_add
# print ' filepath_add',filepath_add
simfile.write(' <additional-files value="')
filenames_add = set([filename_poly, filename_add, filename_ptstops])
filenames_add.discard(None)
filenames_add = list(filenames_add)
if len(filenames_add) > 0:
for filename in filenames_add[:-1]:
simfile.write('%s,' % filename)
simfile.write('%s' % filenames_add[-1])
simfile.write('" />\n')
simfile.write('</input>\n')
if (starttime is not None) & (stoptime is not None):
simfile.write(
"""
<time>
<begin value="%s"/>
<end value="%s"/>
</time>
""" % (starttime, stoptime))
simfile.write('<time-to-teleport value="%s"/>\n' % time_to_teleport)
simfile.write('<seed value="%s"/>\n' % seed)
simfile.write('<step-length value="%s"/>\n' % time_step)
simfile.write('<ignore-route-errors value="%s"/>\n' % is_ignore_route_errors)
if width_sublanes > 0:
simfile.write('<lateral-resolution value="%s"/>\n' % width_sublanes)
# not (yet) recogniced...move to cml
if pedestrian_model != 'None':
simfile.write('<pedestrian.model value="%s"/>\n' % pedestrian_model)
if pedestrian_model == 'striping':
simfile.write('<pedestrian.striping.stripe-width value="%s"/>\n' % width_pedestrian_striping)
simfile.write('<pedestrian.striping.dawdling value="%s"/>\n' % slowdownfactor_pedestrian_striping)
simfile.write('<pedestrian.striping.jamtime value="%s"/>\n' % jamtime_pedestrian_striping)
simfile.write('<collision.check-junctions value="%s"/>\n' % is_collission_check_junctions)
simfile.write('<collision.action value="%s"/>\n' % collission_action)
#simfile.write('<ignore-accidents value="%s"/>\n'%is_ignore_accidents)
simfile.write('<output>\n')
# <output-file value="quickstart.net.xml"/>
if filepath_output_vehroute is not None:
simfile.write('<vehroute-output value="%s"/>\n' % filepath_output_vehroute)
if filepath_output_tripinfo is not None:
simfile.write('<tripinfo-output value="%s"/>\n' % filepath_output_tripinfo)
simfile.write('</output>\n')
if filepath_gui is not None:
simfile.write('<gui-settings-file value="%s"/>\n' % filepath_gui)
if is_openscenegraph:
simfile.write('<osg-view value="true"/>\n')
# <report>
# <no-duration-log value="true"/>
# <no-step-log value="true"/>
# </report>
simfile.write('</configuration>\n')
simfile.close()
# add path to additional files if necessary
if filename_add is not None:
addfile = open(filepath_add, 'w')
addfile.write('<add>\n')
if filepath_output_edgedata is not None:
addfile.write(' <edgeData id="output_edgedata_%d" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_edgedata, str(is_exclude_emptyedges).lower()))
if filepath_output_lanedata is not None:
addfile.write(' <laneData id="output_lanedata_%d" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_lanedata, str(is_exclude_emptylanes).lower()))
if filepath_output_edgeemissions is not None:
addfile.write(' <edgeData id="output_edgeemissions_%d" type="emissions" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_edgeemissions, str(is_exclude_emptyedges).lower()))
if filepath_output_laneemissions is not None:
addfile.write(' <laneData id="output_laneemissions_%d" type="emissions" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_laneemissions, str(is_exclude_emptylanes).lower()))
if filepath_output_edgenoise is not None:
addfile.write(' <edgeData id="edgenoise_%d" type="harmonoise" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_edgenoise, str(is_exclude_emptyedges).lower()))
if filepath_output_lanenoise is not None:
addfile.write(' <laneData id="lanenoise_%d" type="harmonoise" freq="%d" file="%s" excludeEmpty="%s"/>\n' %
(freq, freq, filepath_output_lanenoise, str(is_exclude_emptylanes).lower()))
addfile.write('</add>\n')
addfile.close()
class Sumo(CmlMixin, Process):
def __init__(self, scenario,
results=None,
logger=None,
guimode='sumopy', # sumo,
is_runnow=False,
is_run_background=False, is_nohup=False,
workdirpath=None,
is_export_net=True,
is_export_poly=True,
is_export_rou=True,
is_prompt_filepaths=False,
routefilepaths=None,
netfilepath=None,
ptstopsfilepath=None,
polyfilepath=None,
logfilepath='',
**kwargs):
self._init_common('sumo', parent=scenario, name='SUMO',
logger=logger,
info='SUMO micro simulation of scenario.',
)
self._results = results
rootname = scenario.get_rootfilename()
rootdirpath = scenario.get_workdirpath()
self.configfilepath = os.path.join(rootdirpath, rootname+'.netc.xml')
# if simresults is None:
# self.simresults = Simresults(scenario=scenario)
self.init_cml('xxx', is_run_background=is_run_background, is_nohup=is_nohup) # pass main shell command
attrsman = self.get_attrsman()
# print '\nSumo.__init__',kwargs
#self.scenario = scenario
#self.settings = scenario.settings
self.guimode = attrsman.add(cm.AttrConf('guimode', guimode,
groupnames=['options', 'misc'],
choices=['sumopy', 'sumopy+map', 'native', 'openscene', 'nogui'],
name='GUI mode',
perm='rw',
info='Gui mode: sumopy = sumopy style, sumopy+map = sumopy theme with backround map, native = Native SUMO gui, openscene = Open street graph, nogui = run without gui window'
))
simtime_start_default = scenario.demand.get_time_depart_first()
# estimate end of simtime
simtime_end_default = scenario.demand.get_time_depart_last()
self.simtime_start = attrsman.add(cm.AttrConf('simtime_start', kwargs.get('simtime_start', simtime_start_default),
groupnames=['options', 'timing'],
name='Start time',
perm='rw',
info='Start time of simulation in seconds after midnight.',
unit='s',
))
self.simtime_end = attrsman.add(cm.AttrConf('simtime_end', kwargs.get('simtime_end', simtime_end_default),
groupnames=['options', 'timing'],
name='End time',
perm='rw',
info='End time of simulation in seconds after midnight.',
unit='s',
))
self.time_warmup = attrsman.add(cm.AttrConf('time_warmup', kwargs.get('time_warmup', 0.0),
groupnames=['options', 'timing'],
name='Warmup time',
perm='rw',
info='Start recording results after this time.',
metatype='time',
unit='s',
))
self.time_step = attrsman.add(cm.AttrConf('time_step', kwargs.get('time_step', 0.2),
groupnames=['options', 'timing'],
name='Time step',
perm='rw',
info='Basic simulation time step (1s by default).',
metatype='time',
unit='s',
))
self.time_to_teleport = attrsman.add(cm.AttrConf('time_to_teleport', kwargs.get('time_to_teleport', -1),
groupnames=['options', 'timing'],
name='teleport',
perm='rw',
info='Time to teleport in seconds, which is the time after'
+ 'dedlocks get resolved by teleporting\n'
+ '-1 means no teleporting takes place',
metatype='time',
unit='s',
))
self.time_sample = attrsman.add(cm.AttrConf('time_sample', kwargs.get('time_sample', 60),
groupnames=['options', 'timing'],
name='Output sample time',
perm='rw',
info='Common sampling time of output data.',
metatype='time',
unit='s',
))
self.is_dynaroute = attrsman.add(cm.AttrConf('is_dynaroute', kwargs.get('is_dynaroute', False),
groupnames=['options', 'timing'],
name='Dynamic routing',
perm='rw',
info='Routing is always performed during the simulation, based on current edge travel times. This option corrisponds to the so called one shot assignment.',
))
# print ' ',scenario.demand.vtypes.lanechangemodel.get_value()
if scenario.demand.vtypes.lanechangemodel.get_value() in ['SL2015', ]:
width_sublanes_default = 1.0
else:
width_sublanes_default = -1.0
self.width_sublanes = attrsman.add(cm.AttrConf('width_sublanes', kwargs.get('width_sublanes', width_sublanes_default),
groupnames=['options', 'edges'],
#cml = '--lateral-resolution',
perm='rw',
name='Sublane width',
unit='m',
info='Width of sublanes. Should be less than lane width. If negative the sublanes are disabeled.',
is_enabled=lambda self: self.width_sublanes > 0,
))
self.pedestrian_model = attrsman.add(cm.AttrConf('pedestrian_model', kwargs.get('pedestrian_model', 'striping'),
groupnames=['options', 'parameters'],
name='Pedestrian Model',
choices=['striping', 'nonInteracting', 'None'],
perm='rw',
info='Type of Pedestrian model.',
))
self.width_pedestrian_striping = attrsman.add(cm.AttrConf('width_pedestrian_striping', kwargs.get('width_pedestrian_striping', 0.35),
groupnames=['options', 'parameters'],
name='Ped. stripe width',
unit='m',
perm='rw',
info="Width of parallel stripes for segmenting a sidewalk (meters) for use with model 'striping'",
))
self.slowdownfactor_pedestrian_striping = attrsman.add(cm.AttrConf('slowdownfactor_pedestrian_striping', kwargs.get('slowdownfactor_pedestrian_striping', 0.2),
groupnames=['options', 'parameters'],
name='Ped. slowdown',
perm='rw',
info="Factor for random slow-downs [0,1] for use with model 'striping'",
))
self.jamtime_pedestrian_striping = attrsman.add(cm.AttrConf('jamtime_pedestrian_striping', kwargs.get('jamtime_pedestrian_striping', 10),
groupnames=['options', 'parameters'],
name='Ped. jamtime',
unit='s',
perm='rw',
info="Factor for random slow-downs [0,1] for use with model 'striping'",
))
self.is_edgedata = attrsman.add(cm.AttrConf('is_edgedata', kwargs.get('is_edgedata', False),
groupnames=['options', 'output'],
name='Output edge data',
perm='rw',
info='If set, generate detailed data for all edges.'
))
self.is_routedata = attrsman.add(cm.AttrConf('is_routedata', kwargs.get('is_routedata', False),
groupnames=['options', 'output'],
name='Output route data',
perm='rw',
info='If set, generate detailed data for all routes.'
))
self.is_tripdata = attrsman.add(cm.AttrConf('is_tripdata', kwargs.get('is_tripdata', False),
groupnames=['options', 'output'],
name='Output trip data',
perm='rw',
info='If set, generate detailed data for all trips.'
))
self.is_edgenoise = attrsman.add(cm.AttrConf('is_edgenoise', kwargs.get('is_edgenoise', False),
groupnames=['options', 'output'],
name='Output edge noise',
perm='rw',
info='If set, generate noise information for all edges.'
))
self.is_edgesemissions = attrsman.add(cm.AttrConf('is_edgesemissions', kwargs.get('is_edgesemissions', False),
groupnames=['options', 'output'],
name='Output edge emissions',
perm='rw',
info='If set, generate emission information for all edges.'
))
outfile_prefix = kwargs.get('outfile_prefix', 'out')
self.routesdatapath = attrsman.add(cm.AttrConf('routesdatapath', os.path.join(rootdirpath, rootname+'.'+outfile_prefix+'.roudata.xml'),
groupnames=['outputfiles', '_private'],
perm='r',
name='Route data file',
wildcards='Route data XML files (*.roudata.xml)|*.roudata.xml',
metatype='filepath',
info="""SUMO xml file with route output info.""",
#attrnames_data = ['depart','arrival'],
#element = 'vehicle',
#id_type = 'trip',
#reader = 'plain',
))
self.tripdatapath = attrsman.add(cm.AttrConf('tripdatapath', os.path.join(rootdirpath, rootname+'.'+outfile_prefix+'.tripdata.xml'),
groupnames=['outputfiles', '_private'],
perm='r',
name='Edge data file',
wildcards='Trip data XML files (*.tripdata.xml)|*.tripdata.xml',
metatype='filepath',
info="""SUMO xml file with trip output data.""",
attrnames_data=['depart', 'arrival', 'duration'],
#element = 'tripinfo',
#id_type = 'trip',
#reader = 'plain',
))
self.edgedatapath = attrsman.add(cm.AttrConf('edgedatapath', os.path.join(rootdirpath, rootname+'.'+outfile_prefix+'.edgedata.xml'),
groupnames=['outputfiles', '_private'],
| |
import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import Container
from ..mixin.netcdf import NetCDFHDF5
from . import NumpyArray, abstract
logger = logging.getLogger(__name__)
class Data(Container, NetCDFHDF5, core.Data):
"""An orthogonal multidimensional array with masking and units.
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(
self,
array=None,
units=None,
calendar=None,
fill_value=None,
source=None,
copy=True,
dtype=None,
mask=None,
_use_array=True,
**kwargs,
):
"""**Initialisation**
:Parameters:
array: data_like, optional
The array of values.
{{data_like}}
Ignored if the *source* parameter is set.
*Parameter example:*
``array=[34.6]``
*Parameter example:*
``array=[[1, 2], [3, 4]]``
*Parameter example:*
``array=numpy.ma.arange(10).reshape(2, 1, 5)``
units: `str`, optional
The physical units of the data. Ignored if the *source*
parameter is set.
The units may also be set after initialisation with the
`set_units` method.
*Parameter example:*
``units='km hr-1'``
*Parameter example:*
``units='days since 2018-12-01'``
calendar: `str`, optional
The calendar for reference time units. Ignored if the
*source* parameter is set.
The calendar may also be set after initialisation with the
`set_calendar` method.
*Parameter example:*
``calendar='360_day'``
fill_value: optional
The fill value of the data. By default, or if set to
`None`, the `numpy` fill value appropriate to the array's
data type will be used (see
`numpy.ma.default_fill_value`). Ignored if the *source*
parameter is set.
The fill value may also be set after initialisation with
the `set_fill_value` method.
*Parameter example:*
``fill_value=-999.``
dtype: data-type, optional
The desired data-type for the data. By default the
data-type will be inferred form the *array* parameter.
The data-type may also be set after initialisation
with the `dtype` attribute.
*Parameter example:*
``dtype=float``
*Parameter example:*
``dtype='float32'``
*Parameter example:*
``dtype=numpy.dtype('i2')``
mask: data_like, optional
Apply this mask to the data given by the *array*
parameter. By default, or if *mask* is `None`, no mask
is applied. May be any data_like object that
broadcasts to *array*. Masking will be carried out
where mask elements evaluate to `True`.
{{data_like}}
This mask will applied in addition to any mask already
defined by the *array* parameter.
source: optional
Initialise the array, units, calendar and fill value
from those of *source*.
{{init source}}
copy: `bool`, optional
If False then do not deep copy input parameters prior
to initialisation. By default arguments are deep
copied.
kwargs: ignored
Not used. Present to facilitate subclassing.
"""
if dtype is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = array.astype(dtype)
array = NumpyArray(array)
if mask is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = numpy.ma.array(array, mask=mask)
array = NumpyArray(array)
super().__init__(
array=array,
units=units,
calendar=calendar,
fill_value=fill_value,
source=source,
copy=copy,
_use_array=_use_array,
)
self._initialise_netcdf(source)
def __array__(self, *dtype):
"""The numpy array interface.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
dtype: optional
Typecode or data-type to which the array is cast.
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3])
>>> a = numpy.array(d)
>>> print(type(a))
<class 'numpy.ndarray'>
>>> a[0] = -99
>>> d
<{{repr}}{{class}}(3): [1, 2, 3]>
>>> b = numpy.array(d, float)
>>> print(b)
[1. 2. 3.]
"""
array = self.array
if not dtype:
return array
else:
return array.astype(dtype[0], copy=False)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
try:
shape = self.shape
except AttributeError:
shape = ""
else:
shape = str(shape)
shape = shape.replace(",)", ")")
return f"<{ self.__class__.__name__}{shape}: {self}>"
def __format__(self, format_spec):
"""Interpret format specifiers for size 1 arrays.
**Examples:**
>>> d = {{package}}.{{class}}(9, 'metres')
>>> f"{d}"
'9 metres'
>>> f"{d!s}"
'9 metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(): 9 metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([[9]], 'metres')
>>> f"{d}"
'[[9]] metres'
>>> f"{d!s}"
'[[9]] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(1, 1): [[9]] metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([9, 10], 'metres')
>>> f"{d}"
>>> '[9, 10] metres'
>>> f"{d!s}"
>>> '[9, 10] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(2): [9, 10] metres>'
>>> f"{d:.3f}"
Traceback (most recent call last):
...
ValueError: Can't format Data array of size 2 with format code .3f
"""
if not format_spec:
return super().__format__("")
n = self.size
if n == 1:
return "{x:{f}}".format(x=self.first_element(), f=format_spec)
raise ValueError(
f"Can't format Data array of size {n} with "
f"format code {format_spec}"
)
def __getitem__(self, indices):
"""Return a subspace of the data defined by indices.
d.__getitem__(indices) <==> d[indices]
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__setitem__`, `_parse_indices`
:Returns:
`{{class}}`
The subspace of the data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1].shape
(1, 10, 1)
>>> d[:, 0].shape
(1, 1, 9)
>>> d[..., 6:3:-1, 3:6].shape
(1, 3, 3)
>>> d[0, [2, 9], [4, 8]].shape
(1, 2, 2)
>>> d[0, :, -2].shape
(1, 10, 1)
"""
indices = self._parse_indices(indices)
array = self._get_Array(None)
if array is None:
raise ValueError("No array!!")
array = array[tuple(indices)]
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def __int__(self):
"""Called by the `int` built-in function.
x.__int__() <==> int(x)
"""
if self.size != 1:
raise TypeError(
"only length-1 arrays can be converted to "
f"Python scalars. Got {self}"
)
return int(self.array)
def __iter__(self):
"""Called when an iterator is required.
x.__iter__() <==> iter(x)
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3], 'metres')
>>> for e in d:
... print(repr(e))
...
1
2
3
>>> d = {{package}}.{{class}}([[1, 2], [4, 5]], 'metres')
>>> for e in d:
... print(repr(e))
...
<{{repr}}Data(2): [1, 2] metres>
<{{repr}}Data(2): [4, 5] metres>
>>> d = {{package}}.{{class}}(34, 'metres')
>>> for e in d:
... print(repr(e))
Traceback (most recent call last):
...
TypeError: Iteration over 0-d Data
"""
ndim = self.ndim
if not ndim:
raise TypeError(f"Iteration over 0-d {self.__class__.__name__}")
if ndim == 1:
i = iter(self.array)
while 1:
try:
yield next(i)
except StopIteration:
return
else:
# ndim > 1
for n in range(self.shape[0]):
out = self[n, ...]
out.squeeze(0, inplace=True)
yield out
def __setitem__(self, indices, value):
"""Assign to data elements defined by indices.
d.__setitem__(indices, x) <==> d[indices]=x
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
**Broadcasting**
The value, or values, being assigned must be broadcastable to the
shape defined by the indices, using the numpy broadcasting rules.
**Missing data**
Data array elements may be set to missing values by assigning them
to `masked`. Missing values may be unmasked by assigning them to
any other value.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__getitem__`, `_parse_indices`
:Returns:
`None`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1] = -10
>>> d[:, 0] = range(9)
>>> d[..., 6:3:-1, 3:6] = numpy.arange(-18, -9).reshape(3, 3)
>>> d[0, [2, 9], [4, 8]] = {{package}}.{{class}}([[-2, -3]])
>>> d[0, :, -2] = {{package}}.masked
"""
indices = self._parse_indices(indices)
array = self.array
if value is cfdm_masked or numpy.ma.isMA(value):
# The data is not masked but the assignment is masking
# elements, so turn the non-masked array into a masked
# one.
array = array.view(numpy.ma.MaskedArray)
self._set_subspace(array, indices, numpy.asanyarray(value))
self._set_Array(array, copy=False)
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
units = self.get_units(None)
calendar = self.get_calendar(None)
isreftime = False
if units is not None:
if isinstance(units, str):
isreftime = | |
for snp_index in sorted(self.considered_snp_indices):
snp_counter += 1
self.hessian_matrices[snp_index] = hessian_matrices[snp_counter]
# initialize std_error_values as an empty dictionary
self.std_error_values = dict()
# queue
queue_std_error = multiprocessing.Queue()
# thread to read from the queue
std_error_read_thread = threading.Thread(target=self.read_queue_std_error, args=(queue_std_error,))
std_error_read_thread.daemon = True
std_error_read_thread.start()
# processes to compute the std error values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_std_error_logistic_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_std_error,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
std_error_read_thread.join()
# close queues
queue_std_error.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update ignored index set
for snp_index in self.considered_snp_indices:
if self.std_error_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.t_stat_values[snp_index] = self.std_error_values[snp_index]
self.p_values[snp_index] = self.std_error_values[snp_index]
continue
# compute the results (i.e. t-stats and p-values) for the chunk
self.compute_results_regression()
# add chromosome number, base pair distance, and p-value of the current chunk to results for all chunks
self.append_to_results_all_chunks()
# save results
save_process = multiprocessing.Process(target=self.save_results_regression)
save_process.daemon = True
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as std_error_logistic_exception:
logger.error(f'Project {self.project_id}: {std_error_logistic_exception}')
self.project_failed()
def calculate_std_error_logistic_sub_chunk(self, start_index, end_index, queue_std_error):
""" Compute logistic regression std error values for a sub-chunk """
std_error_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_std_error.put(std_error_values)
std_error_values = dict()
if np.linalg.det(self.hessian_matrices[snp_index]) == 0:
std_error_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
std_error_values[snp_index] = np.sqrt(np.linalg.inv(self.hessian_matrices[snp_index]).diagonal())
queue_std_error.put(std_error_values)
# ############### functions related to all algorithms
def init_algorithm_attributes(self):
""" Set the chi-square or linear/logistic regression algorithm related dictionaries to empty """
self.non_missing_sample_counts = dict()
self.allele_counts = dict()
self.minor_allele_names = dict()
self.major_allele_names = dict()
self.minor_allele_counts = dict()
self.major_allele_counts = dict()
self.minor_allele_frequencies = dict()
self.major_allele_frequencies = dict()
self.contingency_tables = dict()
self.maf_case = dict()
self.maf_control = dict()
self.chi_square_values = dict()
self.odd_ratio_values = dict()
self.xt_x_matrices = dict()
self.xt_y_vectors = dict()
self.xt_x_inverse_matrices = dict()
self.sse_values = dict()
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.new_log_likelihood_values = dict()
self.new_beta_values = dict()
self.log_likelihood_values = dict()
self.beta_values = dict()
self.std_error_values = dict()
self.t_stat_values = dict()
self.p_values = dict()
def compute_p_values(self):
""" Compute p-values for a chunk with multi-processing """
try:
queue_p_values = multiprocessing.Queue()
# thread to read from the queue
p_value_read_thread = threading.Thread(target=self.read_queue_p_values, args=(queue_p_values,))
p_value_read_thread.daemon = True
p_value_read_thread.start()
# processes to compute the p-values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices,sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_p_values_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_p_values,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
p_value_read_thread.join()
# close queues
queue_p_values.close()
# terminate the processes
for proc in process_list:
proc.terminate()
logger.info(f"Project {self.project_id}: p-value computation is done for chunk # {self.current_chunk}!")
except Exception as p_value_exception:
logger.error(f'Project {self.project_id}: {p_value_exception}')
self.project_failed()
def calculate_p_values_sub_chunk(self, start_index, end_index, queue_p_values):
""" Compute p-values for a sub-chunk """
p_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_p_values.put(p_values)
p_values = dict()
if self.algorithm == SplinkAlgorithm.CHI_SQUARE:
p_values[snp_index] = 1 - stats.chi2.cdf(self.chi_square_values[snp_index], 1)
elif self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
degree_of_freedom = self.non_missing_sample_counts[snp_index] - len(self.covariates) - 2
p_values[snp_index] = 2 * (1 - stats.t.cdf(np.abs(self.t_stat_values[snp_index]), degree_of_freedom))
elif self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION:
p_values[snp_index] = 1 - stats.chi2.cdf(np.square(np.array(self.t_stat_values[snp_index])), 1)
queue_p_values.put(p_values)
def read_queue_p_values(self, queue_p_values):
while len(self.p_values) < len(self.considered_snp_indices):
prob_values = queue_p_values.get()
self.p_values.update(prob_values)
# ##### Chi-square result computation/saving functions
def compute_maf(self):
""" Compute minor allele frequency of case/control for the chunk """
try:
for snp_index in self.considered_snp_indices:
minor_case = self.contingency_tables[snp_index][0]
major_case = self.contingency_tables[snp_index][1]
minor_control = self.contingency_tables[snp_index][2]
major_control = self.contingency_tables[snp_index][3]
self.maf_case[snp_index] = minor_case / (minor_case + major_case)
self.maf_control[snp_index] = minor_control / (minor_control + major_control)
logger.info(f'Project {self.project_id}: case/control minor allele frequency computation is done for chunk # {self.current_chunk}!')
except Exception as maf_exception:
logger.error(f'Project {self.project_id}: {maf_exception}')
self.project_failed()
def compute_chi_square_values(self):
""" Compute chi-square value for the chunk """
try:
for snp_index in self.considered_snp_indices:
# observed allele counts
observed_allele_counts = self.contingency_tables[snp_index]
# expected allele counts
expected_allele_counts = np.zeros(4)
case_count = self.contingency_tables[snp_index][0] + self.contingency_tables[snp_index][1]
control_count = self.contingency_tables[snp_index][2] + self.contingency_tables[snp_index][3]
minor_count = self.contingency_tables[snp_index][0] + self.contingency_tables[snp_index][2]
major_count = self.contingency_tables[snp_index][1] + self.contingency_tables[snp_index][3]
total_count = case_count + control_count
expected_allele_counts[0] = (case_count * minor_count) / total_count
expected_allele_counts[1] = (case_count * major_count) / total_count
expected_allele_counts[2] = (control_count * minor_count) / total_count
expected_allele_counts[3] = (control_count * major_count) / total_count
# compute chi-square value
chi_square = np.sum(np.square(observed_allele_counts - expected_allele_counts) / expected_allele_counts)
self.chi_square_values[snp_index] = chi_square
logger.info(f"Project {self.project_id}: chi-square computation is done for chunk # {self.current_chunk}!")
except Exception as chi_square_exception:
logger.error(f'Project {self.project_id}: {chi_square_exception}')
self.project_failed()
def compute_odd_ratio_values(self):
""" Compute odd ratio value for the chunk """
try:
for snp_index in self.considered_snp_indices:
minor_case = self.contingency_tables[snp_index][0]
major_case = self.contingency_tables[snp_index][1]
minor_control = self.contingency_tables[snp_index][2]
major_control = self.contingency_tables[snp_index][3]
if (major_case * minor_control) != 0:
self.odd_ratio_values[snp_index] = (minor_case * major_control) / (major_case * minor_control)
else:
self.odd_ratio_values[snp_index] = "NA"
logger.info(f"Project {self.project_id}: odd-ratio computation is done for chunk # {self.current_chunk}!")
except Exception as odd_ratio_exception:
logger.error(f'Project {self.project_id}: {odd_ratio_exception}')
self.project_failed()
def compute_results_chi_square(self):
""" Compute MAF for case/control, chi-square, odd-ratio, and p-values for chi-square algorithm """
try:
self.compute_maf()
self.compute_chi_square_values()
self.compute_odd_ratio_values()
self.compute_p_values()
except Exception as result_computation_error:
logger.error(f"Chi-square result computation error: {result_computation_error}")
self.project_failed()
def save_results_chi_square(self):
""" Save chi-square algorithm results for the chunk into the file """
try:
logger.info(f'Project {self.project_id}: Started saving results for chunk # {self.current_chunk}!')
# create result directory/file if they do not already exist
result_dir = self.create_result_dir()
result_file = open(f'{result_dir}/chi-square-result.csv', 'a')
# write the result file header in the first chunk
if self.current_chunk == 1:
result_file.write('CHR,SNP,BP,A1,F_A,F_U,A2,CHISQ,P,OR')
for snp_index in np.arange(self.chunk_start_index, self.chunk_end_index):
snp_id = self.snp_id_values[snp_index].decode('utf-8')
chromosome_number, snp_name, base_pair_distance = snp_id.split('\t')
minor_allele = self.minor_allele_names[snp_index]
major_allele = self.major_allele_names[snp_index]
maf_case = round_result(self.maf_case[snp_index])
maf_control = round_result(self.maf_control[snp_index])
chi_square = round_result(self.chi_square_values[snp_index])
p_value = round_result(self.p_values[snp_index])
odd_ratio = round_result(self.odd_ratio_values[snp_index])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},{minor_allele},{maf_case},' \
f'{maf_control},{major_allele},{chi_square},{p_value},{odd_ratio}'
result_file.write("\n" + str(csv_row))
result_file.close()
logger.info(f'Project {self.project_id}: Saving results done for chunk # {self.current_chunk}!')
except Exception as save_exception:
logger.error(f'Project {self.project_id}: {save_exception}')
self.project_failed()
# ###### Linear/logistic regression result computation/saving functions
def compute_t_stat_values(self):
""" Compute T statistics for the chunk """
try:
for snp_index in self.considered_snp_indices:
self.t_stat_values[snp_index] = self.beta_values[snp_index] / self.std_error_values[snp_index]
logger.info(f'Project {self.project_id}: T statistics computation done for chunk # {self.current_chunk}!')
except Exception as t_stats_exception:
logger.error(f'Project {self.project_id}: {t_stats_exception}')
self.project_failed()
def compute_results_regression(self):
""" Compute t-stat and p-values for the linear/logistic regression algorithm """
try:
self.compute_t_stat_values()
self.compute_p_values()
except Exception as result_computation_error:
logger.error(f"Regression result computation error: {result_computation_error}")
self.project_failed()
def save_results_regression(self):
""" Save the linear/logistic regression results for the chunk into the file """
try:
# create result directory/file if they do not already exist
result_dir = self.create_result_dir()
if self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
result_file = open(f'{result_dir}/linear-regression-result.csv', 'a')
else:
result_file = open(f'{result_dir}/logistic-regression-result.csv', 'a')
# write the result file header in the first chunk
if self.current_chunk == 1:
result_file.write('CHR,SNP,BP,A1,TEST,NMISS,BETA,STAT,P')
for snp_index in np.arange(self.chunk_start_index, self.chunk_end_index):
snp_id = self.snp_id_values[snp_index].decode('utf-8')
chromosome_number, snp_name, base_pair_distance = snp_id.split('\t')
beta_counter = 1
minor_allele = self.minor_allele_names[snp_index]
feature_name = 'ADD'
non_missing_samples = round_result(self.non_missing_sample_counts[snp_index])
beta_value = round_result(self.beta_values[snp_index][beta_counter])
t_stat_value = round_result(self.t_stat_values[snp_index][beta_counter])
p_value = round_result(self.p_values[snp_index][beta_counter])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},' \
f'{minor_allele},{feature_name},{non_missing_samples},' \
f'{beta_value},{t_stat_value},{p_value}'
result_file.write("\n" + str(csv_row))
for covariate in self.covariates:
beta_counter += 1
beta_value = round_result(self.beta_values[snp_index][beta_counter])
t_stat_value = round_result(self.t_stat_values[snp_index][beta_counter])
p_value = round_result(self.p_values[snp_index][beta_counter])
csv_row = f'{chromosome_number},{snp_name},{base_pair_distance},' \
f'{minor_allele},{covariate},{non_missing_samples},' \
f'{beta_value},{t_stat_value},{p_value}'
result_file.write("\n" + str(csv_row))
result_file.close()
logger.info(f'Project {self.project_id}: Saving results done for chunk # {self.current_chunk}!')
except Exception as save_regression_results_exception:
logger.error(f'Project {self.project_id}: {save_regression_results_exception}')
self.project_failed()
# ############## Chunking functions
def init_chunks(self):
""" Set the total number of chunks and start/end indices of the chunks """
try:
self.total_chunks = int(np.ceil(len(self.snp_id_values) / self.chunk_size))
for split in np.array_split(np.arange(len(self.snp_id_values)), self.total_chunks):
self.start_indices_chunks.append(split[0])
self.end_indices_chunks.append(split[-1] + 1)
logger.debug(f'Project {self.project_id}: Initializing of chunks is done!')
except Exception as init_chunk_exp:
logger.error(f'Project {self.project_id}: {init_chunk_exp}')
self.project_failed()
def setup_next_chunk(self):
""" For the next chunk of SNPs:
set the start/end chunk index, increment chunk number,
set the | |
in params:
path_params['entryId'] = params['entry_id'] # noqa: E501
query_params = []
if 'select' in params:
query_params.append(('$select', params['select'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Authorization'] # noqa: E501
return self.api_client.call_api(
'/v1-alpha/Repositories/{repoId}/Entries/{entryId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Entry', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_entry_listing(self, repo_id, entry_id, **kwargs): # noqa: E501
"""get_entry_listing # noqa: E501
- Returns the children entries of a folder in the repository. - Provide an entry ID (must be a folder), and get a paged listing of entries in that folder. Used as a way of navigating through the repository. - Default page size: 100. Allowed OData query options: Select | Count | OrderBy | Skip | Top | SkipToken | Prefer. OData $OrderBy syntax should follow: \"PropertyName direction,PropertyName2 direction\". Sort order can be either value \"asc\" or \"desc\". Optional query parameters: groupByOrderType (bool). This query parameter decides if results are returned in groups based on their entry type. Entries returned in the listing are not automatically converted to their subtype (Folder, Shortcut, Document), so clients who want model-specific information should request it via the GET entry by ID route. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entry_listing(repo_id, entry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repo_id: The requested repository ID. (required)
:param int entry_id: The folder ID. (required)
:param bool group_by_entry_type: An optional query parameter used to indicate if the result should be grouped by entry type or not.
:param str prefer: An optional OData header. Can be used to set the maximum page size using odata.maxpagesize.
:param str select: Limits the properties returned in the result.
:param str orderby: Specifies the order in which items are returned. The maximum number of expressions is 5.
:param int top: Limits the number of items returned from a collection.
:param int skip: Excludes the specified number of items of the queried collection from the result.
:param bool count: Indicates whether the total count of items within a collection are returned in the result.
:return: ODataValueOfIListOfEntry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_entry_listing_with_http_info(repo_id, entry_id, **kwargs) # noqa: E501
else:
(data) = self.get_entry_listing_with_http_info(repo_id, entry_id, **kwargs) # noqa: E501
return data
def get_entry_listing_with_http_info(self, repo_id, entry_id, **kwargs): # noqa: E501
"""get_entry_listing # noqa: E501
- Returns the children entries of a folder in the repository. - Provide an entry ID (must be a folder), and get a paged listing of entries in that folder. Used as a way of navigating through the repository. - Default page size: 100. Allowed OData query options: Select | Count | OrderBy | Skip | Top | SkipToken | Prefer. OData $OrderBy syntax should follow: \"PropertyName direction,PropertyName2 direction\". Sort order can be either value \"asc\" or \"desc\". Optional query parameters: groupByOrderType (bool). This query parameter decides if results are returned in groups based on their entry type. Entries returned in the listing are not automatically converted to their subtype (Folder, Shortcut, Document), so clients who want model-specific information should request it via the GET entry by ID route. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entry_listing_with_http_info(repo_id, entry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repo_id: The requested repository ID. (required)
:param int entry_id: The folder ID. (required)
:param bool group_by_entry_type: An optional query parameter used to indicate if the result should be grouped by entry type or not.
:param str prefer: An optional OData header. Can be used to set the maximum page size using odata.maxpagesize.
:param str select: Limits the properties returned in the result.
:param str orderby: Specifies the order in which items are returned. The maximum number of expressions is 5.
:param int top: Limits the number of items returned from a collection.
:param int skip: Excludes the specified number of items of the queried collection from the result.
:param bool count: Indicates whether the total count of items within a collection are returned in the result.
:return: ODataValueOfIListOfEntry
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repo_id', 'entry_id', 'group_by_entry_type', 'prefer', 'select', 'orderby', 'top', 'skip', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entry_listing" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repo_id' is set
if ('repo_id' not in params or
params['repo_id'] is None):
raise ValueError("Missing the required parameter `repo_id` when calling `get_entry_listing`") # noqa: E501
# verify the required parameter 'entry_id' is set
if ('entry_id' not in params or
params['entry_id'] is None):
raise ValueError("Missing the required parameter `entry_id` when calling `get_entry_listing`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repo_id' in params:
path_params['repoId'] = params['repo_id'] # noqa: E501
if 'entry_id' in params:
path_params['entryId'] = params['entry_id'] # noqa: E501
query_params = []
if 'group_by_entry_type' in params:
query_params.append(('groupByEntryType', params['group_by_entry_type'])) # noqa: E501
if 'select' in params:
query_params.append(('$select', params['select'])) # noqa: E501
if 'orderby' in params:
query_params.append(('$orderby', params['orderby'])) # noqa: E501
if 'top' in params:
query_params.append(('$top', params['top'])) # noqa: E501
if 'skip' in params:
query_params.append(('$skip', params['skip'])) # noqa: E501
if 'count' in params:
query_params.append(('$count', params['count'])) # noqa: E501
header_params = {}
if 'prefer' in params:
header_params['Prefer'] = params['prefer'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Authorization'] # noqa: E501
return self.api_client.call_api(
'/v1-alpha/Repositories/{repoId}/Entries/{entryId}/Laserfiche.Repository.Folder/children', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ODataValueOfIListOfEntry', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_field_values(self, repo_id, entry_id, **kwargs): # noqa: E501
"""get_field_values # noqa: E501
- Returns the fields assigned to an entry. - Provide an entry ID, and get a paged listing of all fields assigned to that entry. - Default page size: 100. Allowed OData query options: Select | Count | OrderBy | Skip | Top | SkipToken | Prefer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_field_values(repo_id, entry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repo_id: The requested repository ID. (required)
:param int entry_id: The requested entry ID. (required)
:param str prefer: An optional OData header. Can be used to set the maximum page size using odata.maxpagesize.
:param bool format_value: An optional query parameter used to indicate if the field values should be formatted. The default value is false.
:param str culture: An optional query parameter used to indicate the locale that should be used for formatting. The value should be a standard language tag. The formatValue query parameter must be set to true, otherwise culture will not be used for formatting.
:param str select: Limits the properties returned in the result.
:param str orderby: Specifies the order in which items are returned. The maximum number of expressions is 5.
:param int top: Limits the number of items returned from a collection.
:param int skip: Excludes the specified number of items of the queried collection from the result.
:param bool count: Indicates whether the total count of items within a collection are returned in the result.
:return: ODataValueOfIListOfFieldValue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_field_values_with_http_info(repo_id, entry_id, **kwargs) # noqa: E501
else:
(data) = self.get_field_values_with_http_info(repo_id, entry_id, **kwargs) | |
#!/usr/bin/env python3
import os
import gdal
import osr
import time
import h5py
import numpy as np
from collections import defaultdict
import isce3.extensions.isceextension as isce3
def runGeocode(self, frequency):
'''
This step maps locations on a DEM to slant range and azimuth time.
'''
# only execute worker if frequency is listed in subset_dict
if frequency not in self.state.subset_dict.keys():
self._print(f'skipping frequency {frequency} because it'
' is not in input parameters:'
f' {[str(f) for f in self.state.subset_dict.keys()]}')
# 1. indicates that the worker was not executed
return 1
_runGeocodeFrequency(self, frequency)
def _runGeocodeFrequency(self, frequency):
self._print(f'starting geocode module for frequency: {frequency}')
state = self.state
pol_list = state.subset_dict[frequency]
radar_grid = self.radar_grid_list[frequency]
orbit = self.orbit
raster_ref_list = []
for pol in pol_list:
h5_ds = f'//science/LSAR/SLC/swaths/frequency{frequency}/{pol}'
raster_ref = f'HDF5:"{state.input_hdf5}":{h5_ds}'
raster_ref_list.append(raster_ref)
self._print('raster list:', raster_ref_list)
self._print('pol list: ', pol_list)
# set temporary files
time_id = str(time.time())
input_temp = os.path.join(state.scratch_path,
f'temp_rslc2gcov_{frequency}_{time_id}.vrt')
output_file = os.path.join(state.scratch_path,
f'temp_rslc2gcov_{frequency}_{time_id}.bin')
out_geo_nlooks = os.path.join(state.scratch_path,
f'temp_geo_nlooks_{time_id}.bin')
out_geo_rtc = os.path.join(state.scratch_path,
f'temp_geo_rtc_{time_id}.bin')
out_dem_vertices = os.path.join(state.scratch_path,
f'temp_dem_vertices_{time_id}.bin')
out_geo_vertices = os.path.join(state.scratch_path,
f'temp_geo_vertices_{time_id}.bin')
# build input VRT
gdal.BuildVRT(input_temp, raster_ref_list, separate=True)
input_raster_obj = isce3.pyRaster(input_temp)
ellps = isce3.pyEllipsoid()
# RTC
rtc_dict = self.get_value(['processing', 'rtc'])
rtc_output_type = rtc_dict['output_type']
rtc_geogrid_upsampling = rtc_dict['geogrid_upsampling']
rtc_algorithm_type = rtc_dict['algorithm_type']
input_terrain_radiometry = rtc_dict[
'input_terrain_radiometry']
rtc_min_value_db = rtc_dict['rtc_min_value_db']
# Geocode
geocode_dict = self.get_value(['processing', 'geocode'])
geocode_algorithm_type = geocode_dict['algorithm_type']
memory_mode = geocode_dict['memory_mode']
geogrid_upsampling = geocode_dict['geogrid_upsampling']
abs_cal_factor = geocode_dict['abs_rad_cal']
clip_min = geocode_dict['clip_min']
clip_max = geocode_dict['clip_max']
min_nlooks = geocode_dict['min_nlooks']
flag_save_nlooks = geocode_dict['save_nlooks']
flag_save_rtc = geocode_dict['save_rtc']
flag_save_dem_vertices = geocode_dict['save_dem_vertices']
flag_save_geo_vertices = geocode_dict['save_geo_vertices']
# Geogrid
state.output_epsg = geocode_dict['outputEPSG']
y_snap = geocode_dict['y_snap']
x_snap = geocode_dict['x_snap']
y_max = geocode_dict['top_left']['y_abs']
x_min = geocode_dict['top_left']['x_abs']
y_min = geocode_dict['bottom_right']['y_abs']
x_max = geocode_dict['bottom_right']['x_abs']
step = geocode_dict['output_posting']
# fix types
rtc_min_value_db = self.cast_input(rtc_min_value_db, dtype=float,
frequency=frequency)
state.output_epsg = self.cast_input(state.output_epsg, dtype=int,
frequency=frequency)
geogrid_upsampling = self.cast_input(geogrid_upsampling, dtype=float,
frequency=frequency)
rtc_geogrid_upsampling = self.cast_input(rtc_geogrid_upsampling,
dtype=float, frequency=frequency)
abs_cal_factor = self.cast_input(abs_cal_factor, dtype=float,
frequency=frequency)
clip_min = self.cast_input(clip_min, dtype=float, default=0,
frequency=frequency)
clip_max = self.cast_input(clip_max, dtype=float, default=2,
frequency=frequency)
min_nlooks = self.cast_input(min_nlooks, dtype=float,
frequency=frequency)
y_snap = self.cast_input(y_snap, dtype=float,
default=np.nan, frequency=frequency)
x_snap = self.cast_input(x_snap, dtype=float,
default=np.nan, frequency=frequency)
y_max = self.cast_input(y_max, dtype=float, default=np.nan,
frequency=frequency)
x_min = self.cast_input(x_min, dtype=float, default=np.nan,
frequency=frequency)
y_min = self.cast_input(y_min, dtype=float,
default=np.nan, frequency=frequency)
x_max = self.cast_input(x_max, dtype=float,
default=np.nan, frequency=frequency)
step_x = self.cast_input(step, dtype=float, default=np.nan,
frequency=frequency)
step_y = -step_x if _is_valid(step_x) else None
# prepare parameters
zero_doppler = isce3.pyLUT2d()
# Instantiate Geocode object depending on raster type
if input_raster_obj.getDatatype() == gdal.GDT_Float32:
geo = isce3.pyGeocodeFloat(orbit, ellps)
elif input_raster_obj.getDatatype() == gdal.GDT_Float64:
geo = isce3.pyGeocodeDouble(orbit, ellps)
elif input_raster_obj.getDatatype() == gdal.GDT_CFloat32:
geo = isce3.pyGeocodeComplexFloat(orbit, ellps)
elif input_raster_obj.getDatatype() == gdal.GDT_CFloat64:
geo = isce3.pyGeocodeComplexDouble(orbit, ellps)
else:
raise NotImplementedError('Unsupported raster type for geocoding')
dem_raster = isce3.pyRaster(state.dem_file)
if state.output_epsg is None:
state.output_epsg = dem_raster.EPSG
if state.geotransform_dict is None:
state.geotransform_dict = {}
if (_is_valid(y_min) and _is_valid(y_max) and
_is_valid(step_y)):
size_y = int(np.round((y_min - y_max)/step_y))
else:
size_y = -32768
if (_is_valid(x_max) and _is_valid(x_min) and
_is_valid(step_x)):
size_x = int(np.round((x_max - x_min)/step_x))
else:
size_x = -32768
# if Geogrid is not fully determined, let Geocode find the missing values
if (size_x == -32768 or size_y == -32768):
geo.geoGrid(x_min, y_max, step_x, step_y,
size_x, size_y, state.output_epsg)
geo.updateGeoGrid(radar_grid, dem_raster)
# update only missing values
if not _is_valid(x_min):
x_min = geo.geoGridStartX
if not _is_valid(y_max):
y_max = geo.geoGridStartY
if not _is_valid(step_x):
step_x = geo.geoGridSpacingX
if not _is_valid(step_y):
step_y = geo.geoGridSpacingY
if not _is_valid(x_max):
x_max = geo.geoGridStartX + geo.geoGridSpacingX * geo.geoGridWidth
if not _is_valid(y_min):
y_min = geo.geoGridStartY + geo.geoGridSpacingY * geo.geoGridLength
x_min = _snap_coordinate(x_min, x_snap, np.floor)
y_max = _snap_coordinate(y_max, y_snap, np.ceil)
x_max = _snap_coordinate(x_max, x_snap, np.ceil)
y_min = _snap_coordinate(y_min, y_snap, np.floor)
size_y = int(np.round((y_min - y_max)/step_y))
size_x = int(np.round((x_max - x_min)/step_x))
geo.geoGrid(x_min, y_max, step_x, step_y,
size_x, size_y, state.output_epsg)
output_dir = os.path.dirname(output_file)
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
exponent = 2
output_dtype = gdal.GDT_Float32
nbands = input_raster_obj.numBands
if geogrid_upsampling is None:
geogrid_upsampling = 1
self._print(f'creating temporary output raster: {output_file}')
geocoded_dict = defaultdict(lambda: None)
output_raster_obj = isce3.pyRaster(output_file,
gdal.GA_Update,
output_dtype,
size_x,
size_y,
nbands,
"ENVI")
geocoded_dict['output_file'] = output_file
if flag_save_nlooks:
out_geo_nlooks_obj = isce3.pyRaster(out_geo_nlooks,
gdal.GA_Update,
gdal.GDT_Float32,
size_x,
size_y,
1,
"ENVI")
geocoded_dict['out_geo_nlooks'] = out_geo_nlooks
else:
out_geo_nlooks_obj = None
if flag_save_rtc:
out_geo_rtc_obj = isce3.pyRaster(out_geo_rtc,
gdal.GA_Update,
gdal.GDT_Float32,
size_x,
size_y,
1,
"ENVI")
geocoded_dict['out_geo_rtc'] = out_geo_rtc
else:
out_geo_rtc_obj = None
if flag_save_dem_vertices:
out_dem_vertices_obj = isce3.pyRaster(out_dem_vertices,
gdal.GA_Update,
gdal.GDT_Float32,
size_x + 1,
size_y + 1,
1,
"ENVI")
geocoded_dict['out_dem_vertices'] = out_dem_vertices
else:
out_dem_vertices_obj = None
if flag_save_geo_vertices:
out_geo_vertices_obj = isce3.pyRaster(out_geo_vertices,
gdal.GA_Update,
gdal.GDT_Float32,
size_x + 1,
size_y + 1,
2,
"ENVI")
geocoded_dict['out_geo_vertices'] = out_geo_vertices
else:
out_geo_vertices_obj = None
# Run geocoding
flag_apply_rtc = (rtc_output_type and
rtc_output_type != input_terrain_radiometry and
'gamma' in rtc_output_type)
if flag_apply_rtc is None:
flag_apply_rtc = False
geotransform = [x_min, step_x, 0, y_max, 0, step_y]
state.geotransform_dict[frequency] = geotransform
# output mode
if ('interp' in geocode_algorithm_type and flag_apply_rtc):
raise NotImplementedError('ERROR interp algorithm does not provide'
' RTC correction')
elif 'interp' in geocode_algorithm_type:
output_mode = 'interp'
elif not flag_apply_rtc:
output_mode = 'area-projection'
else:
output_mode = 'area-projection-gamma_naught'
# input terrain radiometry
if (input_terrain_radiometry is not None and
'sigma' in input_terrain_radiometry):
input_radiometry = 'sigma-naught-ellipsoid'
else:
input_radiometry = 'beta-naught'
if flag_apply_rtc:
output_radiometry_str = 'gamma-naught'
else:
output_radiometry_str = input_radiometry
# number of looks
radar_grid_nlooks = state.nlooks_az * state.nlooks_rg
# rtc min value
kwargs = {}
if rtc_min_value_db is not None:
kwargs['rtc_min_value_db'] = rtc_min_value_db
# absolute calibration factor
if abs_cal_factor is not None:
kwargs['abs_cal_factor'] = abs_cal_factor
# memory mode
if memory_mode is not None:
kwargs['memory_mode'] = memory_mode
if (rtc_algorithm_type is not None and
('DAVID' in rtc_algorithm_type.upper() or
'SMALL' in rtc_algorithm_type.upper())):
kwargs['rtc_algorithm'] = 'RTC_DAVID_SMALL'
elif rtc_algorithm_type is not None:
kwargs['rtc_algorithm'] = 'RTC_AREA_PROJECTION'
if (rtc_geogrid_upsampling is not None and
np.isfinite(rtc_geogrid_upsampling)):
kwargs['rtc_upsampling'] = rtc_geogrid_upsampling
if clip_min is not None:
kwargs['clip_min'] = clip_min
if clip_max is not None:
kwargs['clip_max'] = clip_max
if min_nlooks is not None:
kwargs['min_nlooks'] = min_nlooks
# call the geocode module
geo.geocode(radar_grid,
input_raster_obj,
output_raster_obj,
dem_raster,
output_mode=output_mode,
upsampling=geogrid_upsampling,
input_radiometry=input_radiometry,
exponent=exponent,
radar_grid_nlooks=radar_grid_nlooks,
out_geo_nlooks=out_geo_nlooks_obj,
out_geo_rtc=out_geo_rtc_obj,
out_dem_vertices=out_dem_vertices_obj,
out_geo_vertices=out_geo_vertices_obj,
**kwargs)
del output_raster_obj
if flag_save_nlooks:
del out_geo_nlooks_obj
if flag_save_rtc:
del out_geo_rtc_obj
if flag_save_dem_vertices:
del out_dem_vertices_obj
if flag_save_geo_vertices:
del out_geo_vertices_obj
self._print(f'removing temporary file: {input_temp}')
_remove(input_temp)
output_hdf5 = state.output_hdf5
h5_ds_list = []
with h5py.File(output_hdf5, 'a') as hdf5_obj:
hdf5_obj.attrs['Conventions'] = np.string_("CF-1.8")
root_ds = os.path.join('//', 'science', 'LSAR', 'GCOV', 'grids',
f'frequency{frequency}')
# radiometricTerrainCorrectionFlag
h5_ds = os.path.join(root_ds, 'listOfPolarizations')
if h5_ds in hdf5_obj:
del hdf5_obj[h5_ds]
pol_list_s2 = np.array(pol_list, dtype='S2')
dset = hdf5_obj.create_dataset(h5_ds, data=pol_list_s2)
h5_ds_list.append(h5_ds)
dset.attrs['description'] = np.string_(
'List of processed polarization layers with frequency ' +
frequency)
h5_ds = os.path.join(root_ds, 'radiometricTerrainCorrectionFlag')
if h5_ds in hdf5_obj:
del hdf5_obj[h5_ds]
dset = hdf5_obj.create_dataset(h5_ds, data=np.string_(str(flag_apply_rtc)))
h5_ds_list.append(h5_ds)
# X and Y coordinates
geotransform = self.state.geotransform_dict[frequency]
dx = geotransform[1]
dy = geotransform[5]
x0 = geotransform[0] + 0.5 * dx
y0 = geotransform[3] + 0.5 * dy
xf = x0 + (size_x - 1) * dx
yf = y0 + (size_y - 1) * dy
# xCoordinates
h5_ds = os.path.join(root_ds, 'xCoordinates') # float64
x_vect = np.linspace(x0, xf, size_x, dtype=np.float64)
if h5_ds in hdf5_obj:
del hdf5_obj[h5_ds]
xds = hdf5_obj.create_dataset(h5_ds, data=x_vect)
h5_ds_list.append(h5_ds)
try:
xds.make_scale()
except AttributeError:
pass
# yCoordinates
h5_ds = os.path.join(root_ds, 'yCoordinates') # float64
y_vect = np.linspace(y0, yf, size_y, dtype=np.float64)
if h5_ds in hdf5_obj:
del hdf5_obj[h5_ds]
yds = hdf5_obj.create_dataset(h5_ds, data=y_vect)
h5_ds_list.append(h5_ds)
try:
yds.make_scale()
except AttributeError:
pass
#Associate grid mapping with data - projection created later
h5_ds = os.path.join(root_ds, "projection")
#Set up osr for wkt
srs = osr.SpatialReference()
srs.ImportFromEPSG(self.state.output_epsg)
###Create a new single int dataset for projections
if h5_ds in hdf5_obj:
del hdf5_obj[h5_ds]
projds = hdf5_obj.create_dataset(h5_ds, (), dtype='i')
projds[()] = self.state.output_epsg
h5_ds_list.append(h5_ds)
##WGS84 ellipsoid
projds.attrs['semi_major_axis'] = 6378137.0
projds.attrs['inverse_flattening'] = 298.257223563
projds.attrs['ellipsoid'] = np.string_("WGS84")
##Additional fields
projds.attrs['epsg_code'] = self.state.output_epsg
##CF 1.7+ requires this attribute to be named "crs_wkt"
##spatial_ref is old GDAL way. Using that for testing only.
##For NISAR replace with "crs_wkt"
projds.attrs['spatial_ref'] = np.string_(srs.ExportToWkt())
##Here we have handcoded the attributes for the different cases
##Recommended method is to use pyproj.CRS.to_cf() as shown above
##To get complete set of attributes.
###Geodetic latitude / longitude
if self.state.output_epsg == 4326:
#Set up grid mapping
projds.attrs['grid_mapping_name'] = np.string_('latitude_longitude')
projds.attrs['longitude_of_prime_meridian'] = 0.0
#Setup units for x and y
xds.attrs['standard_name'] = np.string_("longitude")
xds.attrs['units'] = np.string_("degrees_east")
yds.attrs['standard_name'] = np.string_("latitude")
yds.attrs['units'] = np.string_("degrees_north")
### | |
<gh_stars>0
"""Revocation registry admin routes."""
import logging
from asyncio import shield
from aiohttp import web
from aiohttp_apispec import (
docs,
match_info_schema,
querystring_schema,
request_schema,
response_schema,
)
from marshmallow import fields, validate, validates_schema
from marshmallow.exceptions import ValidationError
from ..admin.request_context import AdminRequestContext
from ..indy.util import tails_path
from ..indy.issuer import IndyIssuerError
from ..ledger.error import LedgerError
from ..messaging.credential_definitions.util import CRED_DEF_SENT_RECORD_TYPE
from ..messaging.models.openapi import OpenAPISchema
from ..messaging.valid import (
INDY_CRED_DEF_ID,
INDY_CRED_REV_ID,
INDY_REV_REG_ID,
INDY_REV_REG_SIZE,
UUID4,
WHOLE_NUM,
)
from ..storage.base import BaseStorage
from ..storage.error import StorageError, StorageNotFoundError
from ..tails.base import BaseTailsServer
from .error import RevocationError, RevocationNotSupportedError
from .indy import IndyRevocation
from .manager import RevocationManager, RevocationManagerError
from .models.issuer_cred_rev_record import (
IssuerCredRevRecord,
IssuerCredRevRecordSchema,
)
from .models.issuer_rev_reg_record import IssuerRevRegRecord, IssuerRevRegRecordSchema
LOGGER = logging.getLogger(__name__)
class RevocationModuleResponseSchema(OpenAPISchema):
"""Response schema for Revocation Module."""
class RevRegCreateRequestSchema(OpenAPISchema):
"""Request schema for revocation registry creation request."""
credential_definition_id = fields.Str(
description="Credential definition identifier", **INDY_CRED_DEF_ID
)
max_cred_num = fields.Int(
required=False,
description="Revocation registry size",
strict=True,
**INDY_REV_REG_SIZE,
)
class RevRegResultSchema(OpenAPISchema):
"""Result schema for revocation registry creation request."""
result = fields.Nested(IssuerRevRegRecordSchema())
class CredRevRecordQueryStringSchema(OpenAPISchema):
"""Parameters and validators for credential revocation record request."""
@validates_schema
def validate_fields(self, data, **kwargs):
"""Validate schema fields - must have (rr-id and cr-id) xor cx-id."""
rev_reg_id = data.get("rev_reg_id")
cred_rev_id = data.get("cred_rev_id")
cred_ex_id = data.get("cred_ex_id")
if not (
(rev_reg_id and cred_rev_id and not cred_ex_id)
or (cred_ex_id and not rev_reg_id and not cred_rev_id)
):
raise ValidationError(
"Request must have either rev_reg_id and cred_rev_id or cred_ex_id"
)
rev_reg_id = fields.Str(
description="Revocation registry identifier",
required=False,
**INDY_REV_REG_ID,
)
cred_rev_id = fields.Str(
description="Credential revocation identifier",
required=False,
**INDY_CRED_REV_ID,
)
cred_ex_id = fields.Str(
description="Credential exchange identifier",
required=False,
**UUID4,
)
class RevokeRequestSchema(CredRevRecordQueryStringSchema):
"""Parameters and validators for revocation request."""
publish = fields.Boolean(
description=(
"(True) publish revocation to ledger immediately, or "
"(default, False) mark it pending"
),
required=False,
)
class PublishRevocationsSchema(OpenAPISchema):
"""Request and result_4 schema for revocation publication API call."""
rrid2crid = fields.Dict(
required=False,
keys=fields.Str(example=INDY_REV_REG_ID["example"]), # marshmallow 3.0 ignores
values=fields.List(
fields.Str(
description="Credential revocation identifier", **INDY_CRED_REV_ID
)
),
description="Credential revocation ids by revocation registry id",
)
class ClearPendingRevocationsRequestSchema(OpenAPISchema):
"""Request schema for clear pending revocations API call."""
purge = fields.Dict(
required=False,
keys=fields.Str(example=INDY_REV_REG_ID["example"]), # marshmallow 3.0 ignores
values=fields.List(
fields.Str(
description="Credential revocation identifier", **INDY_CRED_REV_ID
)
),
description=(
"Credential revocation ids by revocation registry id: omit for all, "
"specify null or empty list for all pending per revocation registry"
),
)
class CredRevRecordResultSchema(OpenAPISchema):
"""Result schema for credential revocation record request."""
result = fields.Nested(IssuerCredRevRecordSchema())
class RevRegIssuedResultSchema(OpenAPISchema):
"""Result schema for revocation registry credentials issued request."""
result = fields.Int(
description="Number of credentials issued against revocation registry",
strict=True,
**WHOLE_NUM,
)
class RevRegsCreatedSchema(OpenAPISchema):
"""Result schema for request for revocation registries created."""
rev_reg_ids = fields.List(
fields.Str(description="Revocation registry identifiers", **INDY_REV_REG_ID)
)
class RevRegUpdateTailsFileUriSchema(OpenAPISchema):
"""Request schema for updating tails file URI."""
tails_public_uri = fields.Url(
description="Public URI to the tails file",
example=(
"http://192.168.56.133:6543/revocation/registry/"
f"{INDY_REV_REG_ID['example']}/tails-file"
),
required=True,
)
class RevRegsCreatedQueryStringSchema(OpenAPISchema):
"""Query string parameters and validators for rev regs created request."""
cred_def_id = fields.Str(
description="Credential definition identifier",
required=False,
**INDY_CRED_DEF_ID,
)
state = fields.Str(
description="Revocation registry state",
required=False,
validate=validate.OneOf(
[
getattr(IssuerRevRegRecord, m)
for m in vars(IssuerRevRegRecord)
if m.startswith("STATE_")
]
),
)
class SetRevRegStateQueryStringSchema(OpenAPISchema):
"""Query string parameters and validators for request to set rev reg state."""
state = fields.Str(
description="Revocation registry state to set",
required=True,
validate=validate.OneOf(
[
getattr(IssuerRevRegRecord, m)
for m in vars(IssuerRevRegRecord)
if m.startswith("STATE_")
]
),
)
class RevRegIdMatchInfoSchema(OpenAPISchema):
"""Path parameters and validators for request taking rev reg id."""
rev_reg_id = fields.Str(
description="Revocation Registry identifier",
required=True,
**INDY_REV_REG_ID,
)
class RevocationCredDefIdMatchInfoSchema(OpenAPISchema):
"""Path parameters and validators for request taking cred def id."""
cred_def_id = fields.Str(
description="Credential definition identifier",
required=True,
**INDY_CRED_DEF_ID,
)
@docs(
tags=["revocation"],
summary="Revoke an issued credential",
)
@request_schema(RevokeRequestSchema())
@response_schema(RevocationModuleResponseSchema(), description="")
async def revoke(request: web.BaseRequest):
"""
Request handler for storing a credential request.
Args:
request: aiohttp request object
Returns:
The credential request details.
"""
context: AdminRequestContext = request["context"]
body = await request.json()
rev_reg_id = body.get("rev_reg_id")
cred_rev_id = body.get("cred_rev_id") # numeric str, which indy wants
cred_ex_id = body.get("cred_ex_id")
publish = body.get("publish")
rev_manager = RevocationManager(context.profile)
try:
if cred_ex_id:
await rev_manager.revoke_credential_by_cred_ex_id(cred_ex_id, publish)
else:
await rev_manager.revoke_credential(rev_reg_id, cred_rev_id, publish)
except (
RevocationManagerError,
RevocationError,
StorageError,
IndyIssuerError,
LedgerError,
) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({})
@docs(tags=["revocation"], summary="Publish pending revocations to ledger")
@request_schema(PublishRevocationsSchema())
@response_schema(PublishRevocationsSchema(), 200, description="")
async def publish_revocations(request: web.BaseRequest):
"""
Request handler for publishing pending revocations to the ledger.
Args:
request: aiohttp request object
Returns:
Credential revocation ids published as revoked by revocation registry id.
"""
context: AdminRequestContext = request["context"]
body = await request.json()
rrid2crid = body.get("rrid2crid")
rev_manager = RevocationManager(context.profile)
try:
results = await rev_manager.publish_pending_revocations(rrid2crid)
except (RevocationError, StorageError, IndyIssuerError, LedgerError) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"rrid2crid": results})
@docs(tags=["revocation"], summary="Clear pending revocations")
@request_schema(ClearPendingRevocationsRequestSchema())
@response_schema(PublishRevocationsSchema(), 200, description="")
async def clear_pending_revocations(request: web.BaseRequest):
"""
Request handler for clearing pending revocations.
Args:
request: aiohttp request object
Returns:
Credential revocation ids still pending revocation by revocation registry id.
"""
context: AdminRequestContext = request["context"]
body = await request.json()
purge = body.get("purge")
rev_manager = RevocationManager(context.profile)
try:
results = await rev_manager.clear_pending_revocations(purge)
except StorageError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"rrid2crid": results})
@docs(tags=["revocation"], summary="Creates a new revocation registry")
@request_schema(RevRegCreateRequestSchema())
@response_schema(RevRegResultSchema(), 200, description="")
async def create_rev_reg(request: web.BaseRequest):
"""
Request handler to create a new revocation registry.
Args:
request: aiohttp request object
Returns:
The issuer revocation registry record
"""
context: AdminRequestContext = request["context"]
body = await request.json()
credential_definition_id = body.get("credential_definition_id")
max_cred_num = body.get("max_cred_num")
# check we published this cred def
async with context.session() as session:
storage = session.inject(BaseStorage)
found = await storage.find_all_records(
type_filter=CRED_DEF_SENT_RECORD_TYPE,
tag_query={"cred_def_id": credential_definition_id},
)
if not found:
raise web.HTTPNotFound(
reason=f"Not issuer of credential definition id {credential_definition_id}"
)
try:
revoc = IndyRevocation(context.profile)
issuer_rev_reg_rec = await revoc.init_issuer_registry(
credential_definition_id,
max_cred_num=max_cred_num,
)
except RevocationNotSupportedError as e:
raise web.HTTPBadRequest(reason=e.message) from e
await shield(issuer_rev_reg_rec.generate_registry(context.profile))
return web.json_response({"result_4": issuer_rev_reg_rec.serialize()})
@docs(
tags=["revocation"],
summary="Search for matching revocation registries that current agent created",
)
@querystring_schema(RevRegsCreatedQueryStringSchema())
@response_schema(RevRegsCreatedSchema(), 200, description="")
async def rev_regs_created(request: web.BaseRequest):
"""
Request handler to get revocation registries that current agent created.
Args:
request: aiohttp request object
Returns:
List of identifiers of matching revocation registries.
"""
context: AdminRequestContext = request["context"]
search_tags = [
tag for tag in vars(RevRegsCreatedQueryStringSchema)["_declared_fields"]
]
tag_filter = {
tag: request.query[tag] for tag in search_tags if tag in request.query
}
async with context.session() as session:
found = await IssuerRevRegRecord.query(session, tag_filter)
return web.json_response({"rev_reg_ids": [record.revoc_reg_id for record in found]})
@docs(
tags=["revocation"],
summary="Get revocation registry by revocation registry id",
)
@match_info_schema(RevRegIdMatchInfoSchema())
@response_schema(RevRegResultSchema(), 200, description="")
async def get_rev_reg(request: web.BaseRequest):
"""
Request handler to get a revocation registry by rev reg id.
Args:
request: aiohttp request object
Returns:
The revocation registry
"""
context: AdminRequestContext = request["context"]
rev_reg_id = request.match_info["rev_reg_id"]
try:
revoc = IndyRevocation(context.profile)
rev_reg = await revoc.get_issuer_rev_reg_record(rev_reg_id)
except StorageNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
return web.json_response({"result_4": rev_reg.serialize()})
@docs(
tags=["revocation"],
summary="Get number of credentials issued against revocation registry",
)
@match_info_schema(RevRegIdMatchInfoSchema())
@response_schema(RevRegIssuedResultSchema(), 200, description="")
async def get_rev_reg_issued(request: web.BaseRequest):
"""
Request handler to get number of credentials issued against revocation registry.
Args:
request: aiohttp request object
Returns:
Number of credentials issued against revocation registry
"""
context: AdminRequestContext = request["context"]
rev_reg_id = request.match_info["rev_reg_id"]
async with context.session() as session:
try:
await IssuerRevRegRecord.retrieve_by_revoc_reg_id(session, rev_reg_id)
except StorageNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
count = len(
await IssuerCredRevRecord.query_by_ids(session, rev_reg_id=rev_reg_id)
)
return web.json_response({"result_4": count})
@docs(
tags=["revocation"],
summary="Get credential revocation status",
)
@querystring_schema(CredRevRecordQueryStringSchema())
@response_schema(CredRevRecordResultSchema(), 200, description="")
async def get_cred_rev_record(request: web.BaseRequest):
"""
Request handler to get credential revocation record.
Args:
request: aiohttp request object
Returns:
The issuer credential revocation record
"""
context: AdminRequestContext = request["context"]
rev_reg_id = request.query.get("rev_reg_id")
cred_rev_id = request.query.get("cred_rev_id") # numeric string
cred_ex_id = request.query.get("cred_ex_id")
try:
async with context.session() as session:
if rev_reg_id and cred_rev_id:
rec = await IssuerCredRevRecord.retrieve_by_ids(
session, rev_reg_id, cred_rev_id
)
else:
rec = await IssuerCredRevRecord.retrieve_by_cred_ex_id(
session, cred_ex_id
)
except StorageNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
return web.json_response({"result_4": rec.serialize()})
@docs(
tags=["revocation"],
summary="Get current active revocation registry by credential definition id",
)
@match_info_schema(RevocationCredDefIdMatchInfoSchema())
@response_schema(RevRegResultSchema(), 200, description="")
async def get_active_rev_reg(request: web.BaseRequest):
"""
Request handler to get current active revocation registry by cred def id.
Args:
request: aiohttp request object
Returns:
The revocation registry identifier
"""
context: AdminRequestContext = request["context"]
cred_def_id = request.match_info["cred_def_id"]
try:
revoc = IndyRevocation(context.profile)
rev_reg = await revoc.get_active_issuer_rev_reg_record(cred_def_id)
except StorageNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
return web.json_response({"result_4": rev_reg.serialize()})
@docs(
tags=["revocation"],
summary="Download tails file",
produces=["application/octet-stream"],
)
@match_info_schema(RevRegIdMatchInfoSchema())
@response_schema(RevocationModuleResponseSchema, description="tails file")
async def get_tails_file(request: web.BaseRequest) -> web.FileResponse:
"""
Request handler to download tails file for revocation registry.
Args:
request: aiohttp request object
Returns:
The tails file in FileResponse
"""
context: AdminRequestContext = request["context"]
rev_reg_id = request.match_info["rev_reg_id"]
try:
revoc = IndyRevocation(context.profile)
rev_reg = await revoc.get_issuer_rev_reg_record(rev_reg_id)
except StorageNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
return web.FileResponse(path=rev_reg.tails_local_path, status=200)
@docs(
tags=["revocation"],
summary="Upload local tails file to server",
)
@match_info_schema(RevRegIdMatchInfoSchema())
@response_schema(RevocationModuleResponseSchema(), description="")
async def upload_tails_file(request: web.BaseRequest):
| |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
class TestWorkOrderProcess(common.TransactionCase):
def setUp(self):
super(TestWorkOrderProcess, self).setUp()
self.source_location_id = self.ref('stock.stock_location_14')
self.warehouse = self.env.ref('stock.warehouse0')
def test_00_workorder_process(self):
""" Testing consume quants and produced quants with workorder """
dining_table = self.env.ref("mrp.product_product_computer_desk")
product_table_sheet = self.env.ref('mrp.product_product_computer_desk_head')
product_table_leg = self.env.ref('mrp.product_product_computer_desk_leg')
product_bolt = self.env.ref('mrp.product_product_computer_desk_bolt')
production_table = self.env['mrp.production'].create({
'product_id': dining_table.id,
'product_qty': 1.0,
'product_uom_id': dining_table.uom_id.id,
'bom_id': self.ref("mrp.mrp_bom_desk")
})
# Set tracking lot on finish and consume products.
dining_table.tracking = 'lot'
product_table_sheet.tracking = 'lot'
product_table_leg.tracking = 'lot'
product_bolt.tracking = "lot"
# Initial inventory of product sheet, lags and bolt
lot_sheet = self.env['stock.production.lot'].create({'product_id': product_table_sheet.id})
lot_leg = self.env['stock.production.lot'].create({'product_id': product_table_leg.id})
lot_bolt = self.env['stock.production.lot'].create({'product_id': product_bolt.id})
# Initialize inventory
# --------------------
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': product_table_sheet.id,
'product_uom_id': product_table_sheet.uom_id.id,
'product_qty': 20,
'prod_lot_id': lot_sheet.id,
'location_id': self.source_location_id
}), (0, 0, {
'product_id': product_table_leg.id,
'product_uom_id': product_table_leg.uom_id.id,
'product_qty': 20,
'prod_lot_id': lot_leg.id,
'location_id': self.source_location_id
}), (0, 0, {
'product_id': product_bolt.id,
'product_uom_id': product_bolt.uom_id.id,
'product_qty': 20,
'prod_lot_id': lot_bolt.id,
'location_id': self.source_location_id
})]
})
inventory.action_done()
# Create work order
production_table.button_plan()
# Check Work order created or not
self.assertEqual(len(production_table.workorder_ids), 3)
# ---------------------------------------------------------
# Process all workorder and check it state.
# ----------------------------------------------------------
workorders = production_table.workorder_ids
self.assertEqual(workorders[0].state, 'ready', "First workorder state should be ready.")
self.assertEqual(workorders[1].state, 'pending')
self.assertEqual(workorders[2].state, 'pending')
# --------------------------------------------------------------
# Process cutting operation...
# ---------------------------------------------------------
finished_lot =self.env['stock.production.lot'].create({'product_id': production_table.product_id.id})
workorders[0].write({'final_lot_id': finished_lot.id})
workorders[0].button_start()
workorders[0].active_move_lot_ids[0].write({'lot_id': lot_sheet.id, 'quantity_done': 1})
self.assertEqual(workorders[0].state, 'progress')
workorders[0].record_production()
self.assertEqual(workorders[0].state, 'done')
move_table_sheet = production_table.move_raw_ids.filtered(lambda x : x.product_id == product_table_sheet)
self.assertEqual(move_table_sheet.quantity_done, 1)
# --------------------------------------------------------------
# Process drilling operation ...
# ---------------------------------------------------------
workorders[1].button_start()
workorders[1].active_move_lot_ids[0].write({'lot_id': lot_leg.id, 'quantity_done': 4})
workorders[1].record_production()
move_leg = production_table.move_raw_ids.filtered(lambda x : x.product_id == product_table_leg)
self.assertEqual(workorders[1].state, 'done')
self.assertEqual(move_leg.quantity_done, 4)
# --------------------------------------------------------------
# Process fitting operation ...
# ---------------------------------------------------------
finish_move = production_table.move_finished_ids.filtered(lambda x : x.product_id.id == dining_table.id)
workorders[2].button_start()
move_lot = workorders[2].active_move_lot_ids[0]
move_lot.write({'lot_id': lot_bolt.id, 'quantity_done': 4})
move_table_bolt = production_table.move_raw_ids.filtered(lambda x : x.product_id.id == product_bolt.id)
workorders[2].record_production()
self.assertEqual(workorders[2].state, 'done')
self.assertEqual(move_table_bolt.quantity_done, 4)
# -----------------------------------------
# Post inventory of manufacturing order
# -----------------------------------------
# This behaviour was changed
#self.assertEqual(production_table.state, 'done', "Production order should be in done state.")
# ---------------------------------------------------------------
# Check consume quants and produce quants after posting inventory
# ---------------------------------------------------------------
production_table.button_mark_done()
self.assertEqual(sum(move_table_sheet.quant_ids.mapped('qty')), 1, "Wrong quantity of consumed product %s" % move_table_sheet.product_id.name)
self.assertEqual(sum(move_leg.quant_ids.mapped('qty')), 4, "Wrong quantity of consumed product %s" % move_leg.product_id.name)
self.assertEqual(sum(move_table_bolt.quant_ids.mapped('qty')), 4, "Wrong quantity of consumed product %s" % move_table_bolt.product_id.name)
consume_quants = move_table_sheet.quant_ids + move_leg.quant_ids + move_table_bolt.quant_ids
# Check for produced quant correctly linked with consumed quants or not.
finish_move = production_table.move_finished_ids.filtered(lambda x: x.product_id.id == dining_table.id)
finished_quant = finish_move.quant_ids[0]
for quant in consume_quants:
self.assertEqual(len(quant.produced_quant_ids), 1)
self.assertEqual(quant.produced_quant_ids[0].lot_id.id, finished_lot.id)
self.assertEqual(quant.produced_quant_ids[0].id, finished_quant.id)
# ------------------------------------------
# Check finished quants with consumed quant.
# ------------------------------------------
self.assertEqual(finished_quant.consumed_quant_ids, consume_quants)
def test_01_without_workorder(self):
""" Testing consume quants and produced quants without workorder """
unit = self.ref("product.product_uom_unit")
custom_laptop = self.env.ref("product.product_product_27")
custom_laptop.tracking = 'lot'
# Create new product charger and keybord
# --------------------------------------
product_charger = self.env['product.product'].create({
'name': 'Charger',
'type': 'product',
'tracking': 'lot',
'uom_id': unit,
'uom_po_id': unit})
product_keybord = self.env['product.product'].create({
'name': 'Usb Keybord',
'type': 'product',
'tracking': 'lot',
'uom_id': unit,
'uom_po_id': unit})
# Create bill of material for customized laptop.
bom_custom_laptop = self.env['mrp.bom'].create({
'product_tmpl_id': custom_laptop.product_tmpl_id.id,
'product_qty': 10,
'product_uom_id': unit,
'bom_line_ids': [(0, 0, {
'product_id': product_charger.id,
'product_qty': 20,
'product_uom_id': unit
}), (0, 0, {
'product_id': product_keybord.id,
'product_qty': 20,
'product_uom_id': unit
})]
})
# Create production order for customize laptop.
mo_custom_laptop = self.env['mrp.production'].create({
'product_id': custom_laptop.id,
'product_qty': 10,
'product_uom_id': unit,
'bom_id': bom_custom_laptop.id})
# Assign component to production order.
mo_custom_laptop.action_assign()
# Check production order status of availablity
self.assertEqual(mo_custom_laptop.availability, 'waiting')
# --------------------------------------------------
# Set inventory for rawmaterial charger and keybord
# --------------------------------------------------
lot_charger = self.env['stock.production.lot'].create({'product_id': product_charger.id})
lot_keybord = self.env['stock.production.lot'].create({'product_id': product_keybord.id})
# Initialize Inventory
# --------------------
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': product_charger.id,
'product_uom_id': product_charger.uom_id.id,
'product_qty': 20,
'prod_lot_id': lot_charger.id,
'location_id': self.source_location_id
}), (0, 0, {
'product_id': product_keybord.id,
'product_uom_id': product_keybord.uom_id.id,
'product_qty': 20,
'prod_lot_id': lot_keybord.id,
'location_id': self.source_location_id
})]
})
# inventory.prepare_inventory()
inventory.action_done()
# Check consumed move status
mo_custom_laptop.action_assign()
self.assertEqual( mo_custom_laptop.availability, 'assigned')
# Check current status of raw materials.
for move in mo_custom_laptop.move_raw_ids:
self.assertEqual(move.product_uom_qty, 20, "Wrong consume quantity of raw material %s: %s instead of %s" % (move.product_id.name, move.product_uom_qty, 20))
self.assertEqual(move.quantity_done, 0, "Wrong produced quantity on raw material %s: %s instead of %s" % (move.product_id.name, move.quantity_done, 0))
# -----------------
# Start production
# -----------------
# Produce 6 Unit of custom laptop will consume ( 12 Unit of keybord and 12 Unit of charger)
context = {"active_ids": [mo_custom_laptop.id], "active_id": mo_custom_laptop.id}
product_consume = self.env['mrp.product.produce'].with_context(context).create({'product_qty': 6.00})
laptop_lot_001 = self.env['stock.production.lot'].create({'product_id': custom_laptop.id})
product_consume.lot_id = laptop_lot_001.id
product_consume.consume_line_ids.write({'quantity_done': 12})
product_consume.do_produce()
# Check consumed move after produce 6 quantity of customized laptop.
for move in mo_custom_laptop.move_raw_ids:
self.assertEqual(move.quantity_done, 12, "Wrong produced quantity on raw material %s" % (move.product_id.name))
self.assertEqual(len(mo_custom_laptop.move_raw_ids), 2)
mo_custom_laptop.post_inventory()
self.assertEqual(len(mo_custom_laptop.move_raw_ids), 4)
# Check done move and confirmed move quantity.
charger_done_move = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_charger.id and x.state == 'done')
keybord_done_move = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_keybord.id and x.state == 'done')
self.assertEquals(charger_done_move.product_uom_qty, 12)
self.assertEquals(keybord_done_move.product_uom_qty, 12)
# Produce remaining 4 quantity
# ----------------------------
# Produce 4 Unit of custom laptop will consume ( 8 Unit of keybord and 8 Unit of charger).
context = {"active_ids": [mo_custom_laptop.id], "active_id": mo_custom_laptop.id}
product_consume = self.env['mrp.product.produce'].with_context(context).create({'product_qty': 4.00})
laptop_lot_002 = self.env['stock.production.lot'].create({'product_id': custom_laptop.id})
product_consume.lot_id = laptop_lot_002.id
self.assertEquals(len(product_consume.consume_line_ids), 2)
product_consume.consume_line_ids.write({'quantity_done': 8})
product_consume.do_produce()
charger_move = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_charger.id and x.state != 'done')
keybord_move = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_keybord.id and x.state !='done')
self.assertEquals(charger_move.quantity_done, 8, "Wrong consumed quantity of %s" % charger_move.product_id.name)
self.assertEquals(keybord_move.quantity_done, 8, "Wrong consumed quantity of %s" % keybord_move.product_id.name)
# Post Inventory of production order.
mo_custom_laptop.post_inventory()
raw_moves_state = any(move.state != 'done' for move in mo_custom_laptop.move_raw_ids)
finsh_moves_state = any(move.state != 'done' for move in mo_custom_laptop.move_finished_ids)
self.assertFalse(raw_moves_state, "Wrong state in consumed moves of production order.")
self.assertFalse(finsh_moves_state, "Wrong state in consumed moves of production order.")
# Finished move quants of production order
finshed_quant_lot_001 = mo_custom_laptop.move_finished_ids.filtered(lambda x: x.product_id.id == custom_laptop.id and x.product_uom_qty==6).mapped('quant_ids')
finshed_quant_lot_002 = mo_custom_laptop.move_finished_ids.filtered(lambda x: x.product_id.id == custom_laptop.id and x.product_uom_qty==4).mapped('quant_ids')
# --------------------------------
# Check consume and produce quants
# --------------------------------
# Check consumed quants of lot1
for consume_quant in finshed_quant_lot_001[0].consumed_quant_ids:
self.assertEqual(consume_quant.qty, 12)
self.assertEqual(consume_quant.produced_quant_ids[0].lot_id.id, finshed_quant_lot_001[0].lot_id.id)
self.assertEqual(consume_quant.produced_quant_ids[0].id, finshed_quant_lot_001[0].id)
self.assertEqual(len(finshed_quant_lot_001[0].consumed_quant_ids), 2, "Wrong consumed quant linked with produced quant for lot %s " % laptop_lot_001.name)
# Check total no of quants linked with produced quants.
self.assertEqual(len(finshed_quant_lot_002[0].consumed_quant_ids), 2, "Wrong consumed quant linked with produced quant for lot %s " % laptop_lot_002.name)
# Check consumed quants of lot2
for consume_quant in finshed_quant_lot_002[0].consumed_quant_ids:
self.assertEqual(consume_quant.qty, 8)
self.assertEqual(consume_quant.produced_quant_ids[0].lot_id.id, finshed_quant_lot_002[0].lot_id.id)
self.assertEqual(consume_quant.produced_quant_ids[0].id, finshed_quant_lot_002[0].id)
# Check total quantity consumed of charger, keybord
# --------------------------------------------------
charger_quants = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_charger.id and x.state == 'done').mapped('quant_ids')
keybord_moves = mo_custom_laptop.move_raw_ids.filtered(lambda x: x.product_id.id == product_keybord.id and x.state == 'done').mapped('quant_ids')
self.assertEqual(sum(charger_quants.mapped('qty')), 20)
self.assertEqual(sum(keybord_moves.mapped('qty')), 20)
def test_02_different_uom_on_bomlines(self):
""" Testing bill of material with diffrent unit of measure."""
route_manufacture = self.warehouse.manufacture_pull_id.route_id.id
route_mto = self.warehouse.mto_pull_id.route_id.id
unit = self.ref("product.product_uom_unit")
dozen = self.ref("product.product_uom_dozen")
kg = self.ref("product.product_uom_kgm")
gm = self.ref("product.product_uom_gram")
# Create Product A, B, C
product_A = self.env['product.product'].create({
'name': 'Product A',
'type': 'product',
'tracking': 'lot',
'uom_id': dozen,
'uom_po_id': dozen,
'route_ids': [(6, 0, [route_manufacture, route_mto])]})
product_B = self.env['product.product'].create({
'name': 'Product B',
'type': 'product',
'tracking': 'lot',
'uom_id': dozen,
'uom_po_id': dozen})
product_C = self.env['product.product'].create({
'name': 'Product C',
'type': 'product',
'tracking': 'lot',
'uom_id': kg,
'uom_po_id': kg})
# Bill of materials
# -----------------
#===================================
# Product A 1 Unit
# Product B 4 Unit
# Product C 600 gram
# -----------------------------------
bom_a = self.env['mrp.bom'].create({
'product_tmpl_id': product_A.product_tmpl_id.id,
'product_qty': 2,
'product_uom_id': unit,
'bom_line_ids': [(0, 0, {
'product_id': product_B.id,
'product_qty': 4,
'product_uom_id': unit
}), (0, 0, {
'product_id': product_C.id,
'product_qty': 600,
'product_uom_id': gm
})]
})
# Create production order with product A 10 Unit.
# -----------------------------------------------
mo_custom_product = self.env['mrp.production'].create({
'product_id': product_A.id,
'product_qty': 10,
'product_uom_id': unit,
'bom_id': bom_a.id})
move_product_b = mo_custom_product.move_raw_ids.filtered(lambda x: x.product_id == product_B)
move_product_c = mo_custom_product.move_raw_ids.filtered(lambda x: x.product_id == product_C)
# Check move correctly created or not.
self.assertEqual(move_product_b.product_uom_qty, 20)
self.assertEqual(move_product_b.product_uom.id, unit)
self.assertEqual(move_product_c.product_uom_qty, 3000)
self.assertEqual(move_product_c.product_uom.id, gm)
# Lot create for product B and product C
# ---------------------------------------
lot_a = self.env['stock.production.lot'].create({'product_id': product_A.id})
lot_b = self.env['stock.production.lot'].create({'product_id': product_B.id})
lot_c = self.env['stock.production.lot'].create({'product_id': product_C.id})
# Inventory Update
# ----------------
| |
ip_version), ('subnet_address', subnet_address), ('gateway_address', gateway_address), ('dns_server', dns_server), ('dhcp_params', dhcp_params), ])
class yc_vnfd_connection_point_ref_nst__nst_netslice_subnet_instantiation_parameters_vld_vnfd_connection_point_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/netslice-subnet/instantiation-parameters/vld/vnfd-connection-point-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__member_vnf_index_ref','__vnfd_connection_point_ref','__ip_address',)
_yang_name = 'vnfd-connection-point-ref'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__vnfd_connection_point_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vnfd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
self.__member_vnf_index_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="member-vnf-index-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
self.__ip_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslice-subnet', u'instantiation-parameters', u'vld', u'vnfd-connection-point-ref']
def _get_member_vnf_index_ref(self):
"""
Getter method for member_vnf_index_ref, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/member_vnf_index_ref (leafref)
"""
return self.__member_vnf_index_ref
def _set_member_vnf_index_ref(self, v, load=False):
"""
Setter method for member_vnf_index_ref, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/member_vnf_index_ref (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vnf_index_ref is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vnf_index_ref() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="member-vnf-index-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_vnf_index_ref must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="member-vnf-index-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)""",
})
self.__member_vnf_index_ref = t
if hasattr(self, '_set'):
self._set()
def _unset_member_vnf_index_ref(self):
self.__member_vnf_index_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="member-vnf-index-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
def _get_vnfd_connection_point_ref(self):
"""
Getter method for vnfd_connection_point_ref, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/vnfd_connection_point_ref (leafref)
"""
return self.__vnfd_connection_point_ref
def _set_vnfd_connection_point_ref(self, v, load=False):
"""
Setter method for vnfd_connection_point_ref, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/vnfd_connection_point_ref (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_vnfd_connection_point_ref is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vnfd_connection_point_ref() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="vnfd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vnfd_connection_point_ref must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vnfd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)""",
})
self.__vnfd_connection_point_ref = t
if hasattr(self, '_set'):
self._set()
def _unset_vnfd_connection_point_ref(self):
self.__vnfd_connection_point_ref = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vnfd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='leafref', is_config=True)
def _get_ip_address(self):
"""
Getter method for ip_address, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/ip_address (inet:ip-address)
"""
return self.__ip_address
def _set_ip_address(self, v, load=False):
"""
Setter method for ip_address, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vnfd_connection_point_ref/ip_address (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_address must be of a type compatible with inet:ip-address""",
'defined-type': "inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)""",
})
self.__ip_address = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_address(self):
self.__ip_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={u'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='inet:ip-address', is_config=True)
member_vnf_index_ref = __builtin__.property(_get_member_vnf_index_ref, _set_member_vnf_index_ref)
vnfd_connection_point_ref = __builtin__.property(_get_vnfd_connection_point_ref, _set_vnfd_connection_point_ref)
ip_address = __builtin__.property(_get_ip_address, _set_ip_address)
_pyangbind_elements = OrderedDict([('member_vnf_index_ref', member_vnf_index_ref), ('vnfd_connection_point_ref', vnfd_connection_point_ref), ('ip_address', ip_address), ])
class yc_vld_nst__nst_netslice_subnet_instantiation_parameters_vld(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/netslice-subnet/instantiation-parameters/vld. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__vim_network_name','__ip_profile','__vnfd_connection_point_ref',)
_yang_name = 'vld'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__vnfd_connection_point_ref = YANGDynClass(base=YANGListType("member_vnf_index_ref vnfd_connection_point_ref",yc_vnfd_connection_point_ref_nst__nst_netslice_subnet_instantiation_parameters_vld_vnfd_connection_point_ref, yang_name="vnfd-connection-point-ref", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='member-vnf-index-ref vnfd-connection-point-ref', extensions=None), is_container='list', yang_name="vnfd-connection-point-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='list', is_config=True)
self.__vim_network_name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vim-network-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
self.__ip_profile = YANGDynClass(base=yc_ip_profile_nst__nst_netslice_subnet_instantiation_parameters_vld_ip_profile, is_container='container', yang_name="ip-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'netslice-subnet', u'instantiation-parameters', u'vld']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/name (string)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_vim_network_name(self):
"""
Getter method for vim_network_name, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vim_network_name (string)
"""
return self.__vim_network_name
def _set_vim_network_name(self, v, load=False):
"""
Setter method for vim_network_name, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/vim_network_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_vim_network_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vim_network_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="vim-network-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vim_network_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vim-network-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__vim_network_name = t
if hasattr(self, '_set'):
self._set()
def _unset_vim_network_name(self):
self.__vim_network_name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="vim-network-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
def _get_ip_profile(self):
"""
Getter method for ip_profile, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile (container)
"""
return self.__ip_profile
def _set_ip_profile(self, v, load=False):
"""
Setter method for ip_profile, mapped from YANG variable /nst/netslice_subnet/instantiation_parameters/vld/ip_profile (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_profile is considered as a | |
#!/usr/bin/python
import sys
import numpy
import os
import math
import functools
import rospy
import cv2
import rospkg
from cv_bridge import CvBridge, CvBridgeError
from cv_detection_camera_helper import CameraHelper
imgindex = 0
def __create_mask_image_from_template(reference_image, template, pos_x, pos_y):
'''
Resize a template image to match a reference image size, placing the template's center in the given position, to be used as a mask
:param reference_image: The reference image that the returned mask is intended to be applied to. The returned mask will have the same size as this image
:param template: The template to resize
:param pos_x: The x position where to place the center of the template in the final image
:param pos_y: The y position where to place the center of the template in the final image
:return: The resized, correctly positioned template
'''
# Get image and template sizes
reference_image_height, reference_image_width = reference_image.shape
template_height, template_width = template.shape
# Get the position the template should be placed at
pos_x_corrected = pos_x - template_width // 2
pos_y_corrected = pos_y - template_height // 2
# Calculate bottom and right margins
bottom_margin = reference_image_height - template_height - pos_y_corrected
right_margin = reference_image_width - template_width - pos_x_corrected
# Add the borders to the template image
border_top = max(0, pos_y_corrected)
border_bottom = max(0, bottom_margin)
border_left = max(0, pos_x_corrected)
border_right = max(0, right_margin)
mask_image = cv2.copyMakeBorder(template, border_top, border_bottom, border_left, border_right, cv2.BORDER_CONSTANT, value=0)
# Crop the image, in case the template ended up outside of the image
crop_top = int(math.fabs(min(0, pos_y_corrected)))
crop_bottom = crop_top + reference_image_height
crop_left = int(math.fabs(min(0, pos_x_corrected)))
crop_right = crop_left + reference_image_width
mask_image_cropped = mask_image[crop_top:crop_bottom,crop_left:crop_right]
return mask_image_cropped
def __rotate_image_size_corrected(image, angle):
# Calculate max size for the rotated template and image offset
image_size_height, image_size_width = image.shape
image_center_x = image_size_width // 2
image_center_y = image_size_height // 2
# Create rotation matrix
rotation_matrix = cv2.getRotationMatrix2D((image_center_x, image_center_y), -angle, 1)
# Apply offset
new_image_size = int(math.ceil(cv2.norm((image_size_height, image_size_width), normType=cv2.NORM_L2)))
rotation_matrix[0, 2] += (new_image_size - image_size_width) / 2
rotation_matrix[1, 2] += (new_image_size - image_size_height) / 2
# Apply rotation to the template
image_rotated = cv2.warpAffine(image, rotation_matrix, (new_image_size, new_image_size))
return image_rotated
def __apply_template_matching(angle, template, image):
# Rotate the template
template_rotated = __rotate_image_size_corrected(template, angle)
# Apply template matching
image_templated = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED)
# Correct template matching image size difference
template_rotated_height, template_rotated_width = template_rotated.shape
template_half_height = template_rotated_height // 2
template_half_width = template_rotated_width // 2
image_templated_inrange_size_corrected = cv2.copyMakeBorder(image_templated, template_half_height, template_half_height, template_half_width, template_half_width, cv2.BORDER_CONSTANT, value=0)
# Calculate maximum match coefficient
max_match = numpy.max(image_templated_inrange_size_corrected)
return (max_match, angle, template_rotated, image_templated_inrange_size_corrected)
def get_cubes_z_rotation(cv_image, CUBE_SIZE=90):
"""
Gets the cubes rotation in the Z plane from an image. The results are sorted by distance to the center of the image
:param cv_image: The OpenCV image to get the cubes from
:return: An array containing the positions, angles and clearances of the cubes that have been found. The returned angles lie in the interval [-45, 45)
For example:
[((388, 526), -41.0, True, True), ((556, 524), -31.0, True, True), ((474, 382), -31.0, True, False)]
"""
# Show original image
#cv2.imshow("Original image", cv_image)
# Convert to grayscale
cv_image_grayscale = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
# Apply blur
BLUR_SIZE = 3
cv_image_blur = cv2.GaussianBlur(cv_image_grayscale, (BLUR_SIZE, BLUR_SIZE), 0)
#cv2.imshow("Blur", cv_image_blur)
# Apply CLAHE
CLAHE_SIZE = 64
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(CLAHE_SIZE, CLAHE_SIZE))
cv_image_clahe = clahe.apply(cv_image_blur)
#cv2.imshow("CLAHE", cv_image_clahe)
# Apply Canny filter
sigma = 0.33
median = numpy.median(cv_image_clahe)
lower = int(max(0, (1.0 - sigma) * median))
upper = int(min(255, (1.0 + sigma) * median))
cv_image_canny = cv2.Canny(cv_image_clahe, lower, upper)
#cv2.imshow("Canny", cv_image_canny)
# Apply dilation
DILATION_SIZE = 5
dilation_kernel = numpy.ones((DILATION_SIZE, DILATION_SIZE), numpy.uint8)
cv_image_dilated = cv2.dilate(cv_image_canny, dilation_kernel, iterations = 1)
#cv2.imshow("Dilation", cv_image_dilated)
# Find contours
_, contours, _ = cv2.findContours(cv_image_dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Simplify contours
#contours_approx = [cv2.approxPolyDP(cnt, 0.005 * cv2.arcLength(cnt, True), True) for cnt in contours]
# Draw image with filled contours
cv_image_height, cv_image_width = cv_image_grayscale.shape
cv_image_contours_filled = numpy.zeros((cv_image_height, cv_image_width), numpy.uint8)
cv_image_contours_filled = cv2.fillPoly(cv_image_contours_filled, pts=contours, color=255)
#cv2.imshow("Contours", cv_image_contours_filled)
# Create cube image for template matching
cv_image_cube_template = numpy.full((CUBE_SIZE, CUBE_SIZE, 1), 255, numpy.uint8)
CUBE_BORDER_SIZE = 4
cv_image_cube_template_border = cv2.copyMakeBorder(cv_image_cube_template, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, cv2.BORDER_CONSTANT, value=0)
# Create mask for clearance check
CLEARANCE_AREA_LENGTH = CUBE_SIZE / 2
CLEARANCE_AREA_MARGIN = 20
clearance_check_mask = numpy.full((CUBE_SIZE + 2 * CLEARANCE_AREA_MARGIN, CUBE_SIZE), 0, numpy.uint8)
clearance_check_mask = cv2.copyMakeBorder(clearance_check_mask, CLEARANCE_AREA_LENGTH, CLEARANCE_AREA_LENGTH, 0, 0, cv2.BORDER_CONSTANT, value=255)
# Calculate the angles that will be used when rotating the template (between -45 and 45 because of square symmetry)
range_subdivisions = 90
all_angles = numpy.arange(-45, 45, 90.0 / range_subdivisions)
# Look for cubes in the image and erase them until none are found
cube_positions_and_angles = []
original_image_loop = cv_image_contours_filled.copy()
while True:
#cv2.imshow("Loop image", original_image_loop)
#cv2.waitKey(0)
# Apply template matching rotating the template
apply_template_matching_partial = functools.partial(__apply_template_matching, template=cv_image_cube_template_border, image=original_image_loop)
template_matching_results = map(apply_template_matching_partial, all_angles)
# Get max matching coefficient
template_matching_results_max_values = [value for value, _, _, _ in template_matching_results]
template_matching_results_max = max(template_matching_results_max_values)
# Check if the match coefficient is good enough
TEMPLATE_MATCHING_MAX_THRESHOLD = 0.5
if template_matching_results_max < TEMPLATE_MATCHING_MAX_THRESHOLD:
break
# Collect best match
template_matching_results_max_index = template_matching_results_max_values.index(template_matching_results_max)
_, angle, template_rotated, image_templated = template_matching_results[template_matching_results_max_index]
# Find location
_, _, _, (max_loc_x, max_loc_y) = cv2.minMaxLoc(image_templated)
# Apply template as a mask to the original image, deleting the area it matched
template_mask = __create_mask_image_from_template(original_image_loop, template_rotated, max_loc_x, max_loc_y)
template_mask_inverted = cv2.bitwise_not(template_mask)
original_image_loop = cv2.bitwise_and(original_image_loop, template_mask_inverted)
# Rotate the clearance check mask to create the two needed for the clearance test
clearance_check_mask_rotated_0 = __rotate_image_size_corrected(clearance_check_mask, angle)
clearance_check_mask_rotated_90 = __rotate_image_size_corrected(clearance_check_mask, angle + 90)
#cv2.imshow("Clearance check mask rotated 0", clearance_check_mask_rotated_0)
#cv2.imshow("Clearance check mask rotated 90", clearance_check_mask_rotated_90)
# Create mask image from the clearance check mask
clearance_check_mask_full_size_0 = __create_mask_image_from_template(cv_image_contours_filled, clearance_check_mask_rotated_0, max_loc_x, max_loc_y)
clearance_check_mask_full_size_90 = __create_mask_image_from_template(cv_image_contours_filled, clearance_check_mask_rotated_90, max_loc_x, max_loc_y)
#cv2.imshow("Clearance check mask 0 full", clearance_check_mask_full_size_0)
#cv2.imshow("Clearance check mask 90 full", clearance_check_mask_full_size_90)
# Apply clearance mask to the original filled-contours image
original_image = cv_image_contours_filled
original_image_clearance_mask_applied_0 = cv2.bitwise_and(original_image, clearance_check_mask_full_size_0)
original_image_clearance_mask_applied_90 = cv2.bitwise_and(original_image, clearance_check_mask_full_size_90)
#cv2.imshow("Clearance check mask 0 applied", original_image_clearance_mask_applied_0)
#cv2.imshow("Clearance check mask 90 applied", original_image_clearance_mask_applied_90)
# Check clearance
clearance_0_count = cv2.countNonZero(original_image_clearance_mask_applied_0)
clearance_90_count = cv2.countNonZero(original_image_clearance_mask_applied_90)
CLEARANCE_THRESHOLD = 50
clearance_0 = clearance_0_count < CLEARANCE_THRESHOLD
clearance_90 = clearance_90_count < CLEARANCE_THRESHOLD
# Store result
cube_positions_and_angles.append(((max_loc_x, max_loc_y), angle, clearance_0, clearance_90))
# Sort cube positions by distance to the center of the image
image_center_x = cv_image_height // 2
image_center_y = cv_image_width // 2
image_center = (image_center_x, image_center_y)
cube_positions_and_angles_sorted = sorted(cube_positions_and_angles, key=lambda (position, angle, clearance_0, clearance_90) : cv2.norm(position, image_center, normType=cv2.NORM_L2))
# Draw debug image
template_matching_debug_image = cv_image.copy()
for ((x, y), angle, clear_0, clear_90) in cube_positions_and_angles_sorted:
rotated_rectangle = cv2.boxPoints(((x, y), (CUBE_SIZE, CUBE_SIZE), angle))
rotated_rectangle = numpy.int0(rotated_rectangle)
cv2.drawContours(template_matching_debug_image, [rotated_rectangle], -1, (255, 0, 0), 2)
clearance_points_rectangle = cv2.boxPoints(((x, y), (CUBE_SIZE * 0.6, CUBE_SIZE * 0.6), angle + 45))
clearance_points_rectangle = numpy.int0(clearance_points_rectangle)
clearance_bools = [clear_90, clear_0]
for (i, (point_x, point_y)) in enumerate(clearance_points_rectangle):
current_clearance = clearance_bools[i % len(clearance_bools)]
clearance_circle_color = current_clearance and (0, 255, 0) or (0, 0, 255)
cv2.circle(template_matching_debug_image, (point_x, point_y), 5, clearance_circle_color, cv2.FILLED)
cv2.circle(template_matching_debug_image, (point_x, point_y), 5, (255, 255, 255), 2)
#cv2.imshow("Template matching result", template_matching_debug_image)
global imgindex
cv2.imwrite("/tmp/img"+ str(imgindex)+".jpg", template_matching_debug_image)
imgindex+=1
#cv2.waitKey(0)
return cube_positions_and_angles_sorted
def test_right_hand_ros():
"""
Test the cube orientation sensing using ROS
"""
rospy.init_node('cv_detection_right_hand_camera')
camera_name = "right_hand_camera"
camera_helper = CameraHelper(camera_name, "base", 0)
bridge = CvBridge()
try:
while not rospy.is_shutdown():
# Take picture
img_data = camera_helper.take_single_picture()
# Convert to OpenCV format
cv_image = bridge.imgmsg_to_cv2(img_data, "bgr8")
# Save for debugging
#cv2.imwrite("/tmp/debug.png", cv_image)
# Get cube rotation
angles = get_cubes_z_rotation(cv_image)
print(angles)
# Wait for a key press
cv2.waitKey(1)
rospy.sleep(0.1)
except CvBridgeError, err:
rospy.logerr(err)
# Exit
cv2.destroyAllWindows()
def test_right_hand_cam():
"""
Test the blob detection using a USB camera
"""
# Create a video capture object
capture = cv2.VideoCapture(1)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
while True:
# Capture a frame
ret, cv_image = capture.read()
if ret:
# Save for debugging
#cv2.imwrite("/tmp/debug.png", cv_image)
# Get cube rotation
angles = get_cubes_z_rotation(cv_image)
print(angles)
# Check for a press on the Escape key
if cv2.waitKey(1) & 0xFF == 27:
break
# Exit
capture.release()
cv2.destroyAllWindows()
def test_right_hand_debug():
"""
Test the cube orientation sensing using images on disk
"""
# Get files
path = rospkg.RosPack().get_path('sorting_demo') + "/share/test_right_hand_simulator"
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
#files = ["border.png"]
#print(files)
# Process files
for f in files:
# | |
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'340xQJFERMxUQ0BQ8yEW+1rNmufLEp1OZ35+/urVq09dvNxOW6reudRMmVg1FEV1cHDQXZhr'
b'5rzE6dRut+dm5wHEunMRUfWqmiTOzJTQmeklSZLnuQLjouj05mrboE59m0xTBkBCpJA6yqGz'
b'M/Pd7sxBv29maZZ9YBjQb+74N9eiiAtJGxaS6S8nAcKJ0FHz029pxLDQ5PIhhJAkSSwf8QZO'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>BGgyBwyWTjTC'
b'5nkeUgGv3cpfu3pvtx+GpR+XKH0IIaSp65gnJ+QFQa3yRETksywRcmoWuU01OimIhdMc5f3d'
b'/rsPF5854yIkIjVOHTEVppm2UZsCUAHzc92D4dLGTr+sTJK0CqpBmSUYqqCqKixBiGM9oRUA'
b'mHhU+RxcBP/FV65/7PlLZxaIUT/CDJHWMGlMggahkkMwA9UYP0KAEnFM7yk8Hm1txxkxPbk+'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'xQVJhFmKcTk/O//yiy+trKzsHhz+/iuv3HlwfzTKy6K6+e61F55/fr7T2T0Ybu/<KEY>'
b'OdPrXTi3PhyOkKQ7u4d65UJ3dm5xcfHu7T47mX4vMou6uIH6/YNHjx49cfHJ2CehKQCY9J5a'
b'JJDREEKMp4k5NPiNkzkQAQyNAKWtra2NjXuXr1yq8nGWZRMI13iV9z4yo8URsRqYlVR9WRVr'
b'ayvvvntrc3PzqScuxtEJQRlORPb39ouimBVhdkQRF4g8pNVpLywthhACLHOJc6kiVzVYRNNC'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'SFoiLqgGmDg3L<KEY>'
b'<KEY>'
b'nWshATzMRWgXM61Ryesktphdy2wGDxDgAkCAB7YG+OobD959kB/kMsrLg/5wlA/zsggmnVY6'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>1SsGtPdwx+DBRfvSU9j95tePddlTI'
b'OxmOySiaWQyyHWVsN6lBk1upRldXHbefnHZklsRXsybF4MT7At4fbStmZqgp3ppH1E2yqXOm'
b'LyeiiQHQtOcoHQvH597kkvpRVAtUMzMwKNKPhYRoNOzfu3Pr8ur6cq/TMi2IR<KEY>'
b'sbIoS+qkHA3UsSkRS683uzsa+VDOzs4PBgNPSR5gvnq09UgctbLOuXMXfF5WZdlttdfWzm48'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'eZxjFFuNYEGdc1VVxnSxaANUlQJ4sLU7086ePL8U80orYGNzL8p8s5i9Y4QaXq5W5+LCbhYz'
b'ULtXQ9DhKO8PR+l8hwkMzPfSbivJB7kJGYuZseHgcPDGm1d7nfbe3kG301o/u/7EpUuHwwjw'
b'<KEY>'
b'BO9cwiANBqAsfWCcu7DuDQFUBU2SxIemPC5a8KQp82xWx/fVMC7K3IcAAiU4So40siO4+umh'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'QHAu8YGCRcw7U3AwtZieIQxhZnjvg0dQHI7yYAhlONzbe+ONN35ruf1d3/rJDz+7TICzGrcr'
b'ZvErAHDE6wKgIE9N9gfTm9c3/vv/6R/91pe/YtJCLcAm7qs/qGE90dUMYINXzyJeVVxSBnOu'
b'VQX9lV/7wmuvvf4XfvBz3/s93zHbBlCzGRrMoa4XtJhU1kxjAitgjJt3t//xP/vV/+c3vzis'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'svja733VOadGDNrf3blz59bq+fOdTM6fWf3ajXvrly5HMwziSJyy2++PR5V5uJXVMyLSFJFr'
b'jRpMQmIaCVKKYmdnqyxL0zCVDHNsIFQ9AFXPTJMgS/Mr1YirTUn9OB/eunP73IXzCiNXK28A'
b'zIcYMSCiaBKQuGANVFTliUgkgYYzZ1avXn13mI8hnDA5lyAoiHZ3951zkiZJkkQTgtkFM3LS'
b'ytL94aEZSSzppgjxUWP9JUnW6XTMLOYIqZnGqrWIYkS1/WxxzhAUEBaokQMqzC7MO+cODwcI'
b'+o2LgE+dUG+RQNSCvrH+d2IAJsoSgGld+<KEY>'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'1lBFqDlE0R/nWLSQJuNu3PgPmlVQ21QEs7sPHi0uXqyatXJ/6zAvixBCmrbyvKx5aoDpZKkJ'
b'GwAElVYQADE5OxAR2Mqa91pu3nu4tb23urhw7uyZ+V4rc3UCvRDiWEWVOvotDkfF/sFwtz/e'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'X3/tS1/68htvvZWXFVzbrPG7N/6RaclLRDIVp5qsmvoJx8OzUktXUgAkwZSEgtG97d3/6r/9'
b'737pV375s5/5jj/96W89v76Ycf0UqRFIYYCvVyJ2+v7d67c//5tf/J3f/dp+f8wuE8el9+TI'
b'<KEY>'
b'g//YqPuATVWUTvdPdIjYMVW7dhZGR7AZQqgw0UHJhBi+DFUxzIuDwTjAwCBi80ESQaXjfOi1'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>KFGzGCuqiJa'
b'<KEY>'
b'u5sFHyrfTluzs7NvX33nU5/8UxqUiKCA2tbOtmQtNWMnTYlbECGFOWFflGxIXJYmLRExaIBq'
b'1ABDmO3OJpIWVaVmpa9iJnWc8FbXnB754thgBh+<KEY>//<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sSq4ZiuxKcsgxp6mwN2CDXkRrSgnxMFEsVNydDV4CAHmy5qTiJh20S4JFBA9pFLFVAW+thMo'
b'r+9wtoQ6WcpyNCLi5cjTw/8TkQEEQjE9r+PT18fJdkdjeH1ss/NF3URDsCSIQEQxRuTfQpX1'
b'hI6+2RNRIEyKx7P55w/19vVpgXA8Wx4eHScFImrblotSTAEoWypc2gEAA0MVMTYDwvO6+frx'
b'q2t7O9NJeXR0cnx8XNc1M8W6UaSiGi2XS8YL1YCrZnHekxyP9EDLDpKCmuqLg8PlL9uPPvwQ'
b'iV68ePHsxcukwMzLZZMTPL1zQQPDvJMDUlIFxbNF06aDh0+eb21Odzam09FoOhmNyqpgBAoJ'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>J<KEY>aZ5nf7s57/5mxent9//eBxK'
b'<KEY>'
b't7c3pxvTLQ5IBLPZ/OFXX/7qL//qz3/685eHR22bKDAiCRWac6X3TX4xisAlQfkbC4F2zsSg'
b'nnIESU2Yyi8fv/jqv/sf/vv/8X+6/9573/v0ux/cvXPv9v7GZEyhiK00KZ3Pl48fP/3q4aO/'
b'/vyLw+PTJMZlBaGMooTIzEOrujsGem4rgKAXjZ/Wbblv2eZ6CHkleg4vh57jo9ndoyvgJFIz'
b'j2eyrkt02PnlFuuH/epXP0K4ftwbUEW01aZeLM5ndZwv6jYmAcP8RCWGtm1FBELhaAshAhOA'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'y7OzswcPH+7uXwNfDQxEck5EBjbjbKkAxMCxjqNyZJLMUFQCIxHVdT3hcO3atSdPnnznu6db'
b'<KEY>'
b'IVg5tAwnCyLKwK1lNBpt7ewdvj44ePX6/+cUoG8u/cRWVSBCDgDQtKnRjJ76THXTBhH55Byu'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'mokcn52dnp4iolpqm6QISDlyTmobNAUa8ARcHugshle3CaIBKBKZmhkTqcHx6dlf/PTnXAQA'
b'<KEY>'
b'<KEY>'
b'y2d13Zyczhd126oilRaoKEfjIjAhmqJy08hidn56cjSbHbaLc4lt3ZwbtERBpQEjmkxCNWEO'
b'QEFUY4zctsGK1ErTNBJbhGTQquDs9Oj06DDwKASKrcwW8bjhL163s+UrlLhom+VyeXLw+vDw'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'rlIXojQL6p1AgAOrdVmW2JGmspmia8Y+szL0WIaBqpo7zwN2kqv1xg/flA1gMZ89/vrBzfc+'
b'uL67U9FXSaKZUTmSajPWi2BJRJumQZVIiigLbc4Pn+/v38CAsjidLVFCSFaowvX9a2VBX3/9'
b'YGdn50c/+oMnX31poTheLptaYms8KmNqA02+9wc/LMrR6fx8uyyvb23+8fc//<KEY>'
b'jZ4dVdd2Xrx8vYyyO57s37z16OkjYTNGVTVkBQFjAEipdTR9uVyKSIxRIQdkV/WUC3mJ66w6'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'smmae++/n5I+/PKre3duj6eT18cnp7Ozj95/jwC/+OKrW7dubW1vv371Yn5+dv/+<KEY>'
b'jb0bOztbJ+fLJy9eihgFJ3yQmvO4DQdmEILU5S0HAPBAuQSAzsMD6A1xpDoJdHt37Bcnglbg'
b'aLaMRob0jsMZEYGCmrWGgQIiJTBElKSuK4nl1bxNwsz6BoHmjZ3YbWCZ8NoFEFCiWgGpQIQ2'
b'JiOkHGZGu0iJ2tmcAcyStERkpkkSIiFYqyKSAJRCQLNWPLiBtjEOEfo37azQDS5zfqUzEIiA'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>1+H+ka/'
b'JogkVW2aZT2fz+fLZdO2CqKMHSauIoAhKwAAPU8wcOk8TEMmZiYCIxOG1eZo2cNV19eBzCbu'
b'eVkr34ALStHl9sSV0cOPrFKk9Q21Llh3LbBSG7rSt9KVnZiL2pvqM7xtPhdh2NeWWUzYi+ke'
b'OX5tYUQy0HVPbxvYCYZv1CsAdvG9hi3gB1RVRTsRFnr7EmQXOeBOAgbCqO2jBw/+6E/+4+1J'
b'tTsuz45mhkENuRwrFyDWtk2BGAiXMSlxMS5O6sXxyevNneszQybe2r/B1h4cvzg5Oblz+4aq'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'TJ1irsqAtlpEkDovnAsCUI5BmF<KEY>'
b'ypOz2fPXZ/dubPnRzRF9dO/2X3/5iIiMCAHNlAywI8MkFaKBp2r39t0XRcQ2xUAMCAWFJFGj'
b'7O/t7k6gOSvP4uL9a6Oo8OzLeGN7vLszWi7IWr67P0aDZ1/Jjd3p1hZbM03L8xvXxgDw5CHc'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'zpdNPRpvUChYNAAxu30ye5gJmIAlhSiWUmrbdly2jJOqYBtPeG9/Z2NbjKgsiqosCg5MZsIh'
b'B5STmNijDpmZtGYmoiCenpDFMlHVLY10YV68a7nQa2qImRvPA6kuioQQkMhMkIrsSKdIgQ0Z'
b'RGNSZixDQAxJIpdVkohchIApSTIDZMuposAMwciusjG9HU64PMzMzJeyN0vzmhGNgcjefbEr'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'7XvejAqAiMwciN1oQEUgZlVdL<KEY>v/2/Y8++vw3vxmVBYABMQKIyHi60TTLsgpnZydn'
b'<KEY>'
b'KZdHqM8vH0Wbm5ubm5tXKwBrqmQ/T64Y9f/Eyzdr/IOFtW8BwkBdYKbOQ2tlfLzqEQBvBpC8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Ui6DP5pEAqG/JyJqx78iIqLAzGImsXF5mnzvNgIgFRQTc0dVFZOkmiwHGWNPBQma4wACoZrF'
b'WFu9bNuU84aqMUFAigZJlEADUhmKgGRJQM00aRtT26oCkcUY2xRTShgQQE1j25wv5qd128S0'
b'MV+0ZVmRSWBkZhEhWklpbphya0BHk7Cy4DDdmEzADckWKAQCMFP0KYAIBKApSawtNWyaUiyY'
b'qQgi4rKrddNqvVPABajfZaNZ26F8W83WVCAmNiYEQuoWlqKArHcZsoZQighiCaAikiyPxijJ'
b't+eh9Na5ea+XbgRaX5mLsvsVEv9AXBtYBgZXmZkPJOjk9eHjYLAMDmXrYbP0l3T3NhvyocH6'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'1zCIaTw4PPrZz37xkz/+Zztb29dv3vjX//o/v3Xr5n/41S+ljZ5aODUtsKklQ2BjaRY3d7fP'
b'Fri7UT6ZH4eyPD09ffHy8Pv3drf2dnd3d2fHh57yWJGYWVSLglNKR0dHBQclOJuf75RFv5r1'
b'<KEY>'
b'<KEY>dSkJ08fmVpBLJpCVZZl+erg9dbOzunp6cuXL/d29mKMBwcHbvMfxnLth4dbBhDR'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'E6WYBKwgBgSNEREDs4GoqAy2QOyAbU9jxIQxxujSZChERDQhk3kYHCRVUVVizAmvu1q/STTx'
b'apuZaiQCRCPnPZoBYNLERESFajIV4mxTglVcqnyP4XAdHBnYmTiIqiIBgLpNHMDjZYFz8QlU'
b'BUzdnobard1mwGhdPI2q4n6f/cbps3bCUN9YdZozkEF6UdhbiV3zRUNECiwaMafgUpGIACVh'
b'<KEY>bABtwkJgoA5PagLBMQgoWiGI2DCvYwAAAgAElEQVTHY2+Zet6m1HJgVW2lTRDG'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sBzbsShHzOxrOwCEzgPYF8+higgAIQRCIGQPgNvbpX1EBSK1VDIFQCzKeV0/+vrhB5/8/vXp'
b'ZLcqXi4iJBXRismSGqqoICAjI0LTClIoBNN8GUK5vbV5fHpYlRvNaAtHuDXl2emBNe1P/+zP'
b'Tl8f/uQnP7l340Yg+tYH9zHK0YtX87rWtpEYGQkQmCmlREaAvLU1/cMf/+DF1w/O2jmjvXzx'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>DSKpRunYgMgDx6IRu2EIyJ/lkuTdv'
b'kH4JAaBpQNvbHF/fLiA7PMFC4PXxmTiFBwR6ecKoWxpo2JLZG9WAXMJz9DcJgAYkR0Od8a9g'
b'nQehXpxHl9HBC6j/Kh6oqUgSEZf+XcsnNEZRiRKXYNEgSqpTakViD8eamZh6HVwIy+xAVFU1'
b'SSaJkcAEUBExpTalRHy1IHVpV7vw3ceVg1W9fA0AYgq+qRD1PlVX3v9NpV/mzCyE4KRzIgrB'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/ofVyF/F7cnhmywvCN1DEZmwD1fSwavkOmUvCfuMo/<KEY>w'
b'<KEY>'
b'qCiKYUN50/EgLlD/UJ/jJQcOFJgK4r4QETMRd24YJs5yfvb4CaR4Y3tyfWeDCKmsoCiL6RSL'
b'gAEVNWlsU4SyxOnWaPfGdHu3HI1Tassy7O7umeFoPEHEtm0/+fjbW5Opxfg3n/3mT//nf/uz'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'jmE7DNc+INRMMAHtQhf/Fq9gAJ0YahlQQTNU4tm8efD4pTuNglkA+PQ73xqXhBIJTZMgky9D'
b'<KEY>VN2GCmIgDnC6Qdy3h9XcAzZFIu53SgEj4n5NRyL3T++9OUMIAERFmaQTK9Woi+rN'
b'F5zZMzu///hBIjBNgNltWiQG1DHrh3f2MsWfKAI8P1ie1w1yQBMCY7iQYjPvHJAzenohUO9c'
b'MHEPy8BIoBmWHmLkmnJNLAupmd9vbjzRzmHBHCF2ONmSWEqqopYQ1IMqSUwuuEtqVaJp8qeb'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'qk8vnw13NS9iKIaq0Hl4egj9DAeDJtUkEn0SSUxJIRpFo0agEY0GYiiAApi6ZLQuSAXEgijb'
b'Xi6Gk79cQ+ikduhss7S25ZnSkOezOg65wqJm0ntiWCd8u4pi3Xe4JOjn4ncQ7e8GAxO0db7d'
b'a9K/aBSNlgTVej0fIEeSAABmrorQQweaBBFjjG298L0j94LYdDqNMQqgIqjH0kJDtMAYiqwM'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>MRnz569fPWcKxbT0WjU96aq'
b'JhEDMAQxNdG6rkXEwZOUkhsEJpPJdDpFpqoajcfj8/Pzul6YSV3X3sVDtSelJDFpkj7phJkR'
b'URSZbEynmxtvVAD+X5TR37bd/tMp5NDMb1Muv/jakd+pXd5JxvK1RhAfPHl2spSeeDEtw6ff'
b'/rAEZUtIFmOk0KUfNw2El3MjrMRCNYeoichEs9UAzGdUz++sY+uwcQihKIiKMuZIMuBnErEm'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'b+1bZtu2bZMk2+WoSyRt4i/WKUWdNAlimABFIZpGUx2MNLpodekH4XBwXR6rbzr4jsWybdCs'
b'my9JLCZto/SfmDTJajL2Q7+/w5u+XFU3XTs+/LJWscsHLz/IzNBWaidccqVdq8mw9BLA0JLT'
b'qwExxnSx9DJ0lhSJcIDE9wIxAYJa0zQiAkSOZzNzKCoKpaqKeyW5t7cTgTyFFq+UpSHeD51s'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'pXzjU/6xl1V0HMtgqQ23Lr0sb1zeAN6y6GcZB66i4FxRum3mIq4AkBkmluFQUqQW6NdffJkg'
b'<KEY>'
b'U75+/bqq1ktRQ2Y2gJQAEYsQJOaBXoQqtYJqCKqxZdPCjEU8wUx5MauU4oBDiuqKu2oKBCot'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sZ1w7J7Zg89FubOv1UDmuzCS37KGr2qbV+lsWc15/S7JajZAB1QV3ICjoiomAm71UDWRnHEr'
b'RwNTX216mHMo9/fF7ynJJFlUaVL0j8akelGcAu2I8r08neq6jjESBcvRr2k0mpTlyICSZijD'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'W7du0dtH2N++/J3e/O+jDKYu/K6v86ar8B3O+dsUQ4hmR6fzhy+OawEDYCQQ+L1vvXd9bxul'
b'UWs9IRgOjKpXFRURAjTRYFYyqSQi8igEbjEUgbZtt7e3rl3f//WvH3784e0qBC6cn5MAADv4'
b'EBGJuY1JAScbpb93YGSC1C6LABLbUVWKCOBQZrA1rjAZFIxoSqnd2Zzcv72Rfa2AFODxi/p8'
b'KYqsQB5J05MFAJBnjB0QXr6h5R2zz7i7dhB+/9fW/6oqqqKamXqQORAFTc4kcWBOe4l/AD2q'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'o1E1HgGQKSqQxxAA1+sGZahd2IC8frn11tpwra97abXDp8jMmLmqqp4t85a7vaWsqRArxGHw'
b'rF7I7tv8yvu8aXz6T37hmspBcGEj7kZZX6sLjsghFIHLXpz1k980qNYrkQ2EsjLlgaimlFpJ'
b'CU3B7OmjR4v5bGM6vre/O2YbcVFQUYYCDYoQzDAJcDml8VTLiVRVS1S3saoqbZdnx69S27Rt'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b's3VBru1fv35949e//mpSjSxFUCuZJTaBcDweq4BpIgTV5GFqTI2I2qRKKojGdDo/P1uk+9/6'
b'6OuvvwYw1cRkqgJE7p4ImllACEoA2AXgUyBQLasgTSpINqfVt9+75lnRk4ERHJzB05dHWE6W'
b'dVMUDLDGb7/Y/hf/O+xNAwEANMtaAIB5WDYjQEVgQDUPIWpkIPm4kZl4rCFTAwRTy4K4uROC'
b'evxH8wmJq+g5ObI1ZMem3NyAmGNS+Aac5fuc68pBI6dzuqP0hVAh3Q76xrxYQzNgz2HIgyeP'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'YofhaagGeP+amZh2/Zbr5rD9sOZ9lRyjNCD0UAqAktruDrmUZVmV4yWhh6zxnLOYBVPXAdS5'
b'7gLmi1Hnrpp7/kKjXWxJj1RzeQwMBXQ/gdm17uErvE0aXmtARGRAX9AHV6xbKgaXmK367oKe'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'dP+A5a3V+weo+d++zfuCiBzCoo2/+fLBUj2kFDDAtIQf/+DTccACIDW1qnIR9LKNMwuXqprG'
b'o+p7n3zyH/3oh7eu79+8fuP+h3f/+jdfzmYzdz8vCiwCnZ6efv34ycbGxueffW6iYFAUlaaE'
b'aEWB2VpHKCJG3Mb05YOvxuPR3bt3QNPd2zd/9IPvv3f3JqEVjND5CueKZC/bXIioWdaMOin5'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'nM93OXJ5LthVpVMAZPB9/de+y/rvPd3/MsW/5wL1f4fHRURSP2wRAIwQQAfOzULOUEds27ZH'
b'Uv0nd1QByxHzel0CsxltZUyzDoXFi2j6sMEvtsAVK0aPjF4+XwekoAsjbTD21sZhL8Sv3n3w'
b'0Ct7Z9VS3do7HCSIiHb1urr6vQt40D8IOxv32sKLA+20t+iaGTOX5cibaNiS+fyLtlyAnNVd'
b'DeViXmHsAukEIuf6MmDBaCl+/fUDQLlz8/pkXBTTjWp7T6iIBknMgIrxxmhjqxhPgKkR5Wp8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tRWmP+7t9k55AOziIggXLQb/1EoOI/OOZ9vqEt9BrzpnhTV+QyIk10+H5+Al+8PlxS5zkLBz'
b'<KEY>'
b'dXfP6egBAVR1f3+/DMXjr7/+7nc/iAp/+VefnS/myawoqo8//vb5eTubzUIIT5+/fPnyJYh+'
b'+nvfXyzq8/mSiwqA2haYi5TqkhkBQGiijOUAACAASURBVBTQYhP/5jeff/LJd65979NRhX/z'
b'159965NPTmf16SIG5BhjYIYuMo+vsjmwDllR8KjAjz/crwKUCJAaCFWj8Oj58TKqhZGJMZEI'
b'JPEEDT0vwgD6rMcK7uF90ftZ+wygHSKMpllu6OgEV6hLXYJN78jVcTVDRV2J+6s7rDB0wCyg'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'wiPW3mXwUoOQXJ07ste2l7r6jnBHFV+16cpcExfb+XJ/dd30RuqODiB/PwK2volfeTki5ndx'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'9cbWJNJVSTQmixAYEQFDqlpVtUUcDYbMXNWLYrBFBurKM7NR2zSN93VMqUKIm6Px4enxYrEY'
b'jTZayWR1xfYAhuiSTkqTJ8MV0/SHlGvP/utW/h93SY53vb/fVS<KEY>'
b'IgoiYN3hyfnnXz+NjIkG1AFMBuYvPvmZI3B4RTMS8Gorqao6K8z+wb3zafjVp5/PFnMvnFn7'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'2tvd3N0ejkcut84ZogR7nHNEyxBD7XBt63si0jTVvC5nZTkty3lVLdqMpInyMpo6mD2zRxVj'
b'MLOWiGLWBen0iIAsEASCACtom6leRAQwpHiYFQXnGlTtCq+pY1eB4JXDdPnXS1dSa8BZmcCs'
b'CkoCyezQrbErK4kBQ9ha0uI4dgVF+2EA7R3fXa4EnZd3hj4oiX8B1kIvlqVvDVhq+oOu6Pev'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'd++BiLYK/mXqjDZNOzDzfDaLOqkY4QMgYKjL4971tkiI1nsR8d7fuXOHFOpFCaoGCSQ0danM'
b'<KEY>iPB'
b'mOlGIFoiIc12EWmqGpScy4jo9PgkamEAQEKS3ONGnQ8Hojjc2BSBGC48X0zXNpD+giKiyNnR'
b'X0S2f3UnD3ULVVcXCUAUYhOzQ0pdnWhAOk2GXNoH1zDE2/kK/6HKd5tu+w2OtrTOD7XrmZWr'
b'27M1XQOkqz7H2OqxcOUbRcQ+U16EFK35rvd9D/fopUyQSUbpZv3qb5iIpQwQBKJHLw+J4Kfv'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Lpe0MEkBIIgCEtm4fBmUVTF4CV44y4IIsiEEAyam3ySTWGLJKBkxVhHVIAiqKhskADJkXWaK'
b'PKMhGGOcM5kz5EStcaYgL2QAhUe5BVXnDFkQkdPzSnUyHI6UITck4l0um1sTk2cZqM4aIXZ5'
b'UeSjQZ5bssIUggmgAQTIGuuMc1qraCBEoxKq+cX5iSqCsTYvnM0RjDMJvQVufFNxaEDZELTB'
b'Z4GsjXQzUZfWni4oGkPpGAAUjQSNvkBI0WCj5sq+7voctG8P6Iul3ahdHqZuznSTJF5JGEcj'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'gKLJRYHIqGru7MXp2dGLlz/62f7de7c2nr7ywRf5ZAEDxAAYaqkK1FCXYrKT16/zd+7u3X9Q'
b'DDIhtD7MvQ8huBjeSpnb3DY7+1JsKnPu6Je//gy42b99b3uyNdoYDkZ52dSvXr3cGo82NjYy'
b'ohA8GlTgioNFa9AiIoIGVVVwYA/uvGMoZ22MoajhCEiCgIlGT60zs/Nz9k08gdPcjrSnSNyz'
b'4gCAKotiEOfQjAbj23v7z148Hw4GEAIR1aE2xnhfA4CESAgO3vu8GDpCIOerOnCjIs4My7I8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'KOnz06/K<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'4Pk5GItgom0CADreoRAaEUZEIrJEGgPREhhOWIpjNjnFiKeUMCo/tA0pfoshWCt/kEpurrYv'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Pvr6q/vv/+jO7sRyA4zsBdDF9JLGmPHmSEbbC8prkVlVTba2F76xjkJVoiFy1qjz4r0zkg0W'
b'lI1HW47QX0BpBn/7Pz/94M6dv/m//9Pe/t6iLId5lpnt+WLanPrRaIDWIKBELaCiBo9AIp5I'
b'o+Z6Z/fWYDCqgzI3AAAgHVlFO6ekqipfNy7PlBmSCR+ttSn7qmi35DuWqsjb8/D+e48fP65n'
b'i/FgWDcLEGEJEoIqsw8AQDoPIQyHw6ihj6GARFTOZ8fHx3VZEsD0/GJjOBrmxfn5+cXFLJbb'
b'dw7QkPceLSGioCCapqmyrEAi7733PsuyzLrBcHx+di4M5CyQCUGY2WVZZuxkMskzK01zenxE'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'PHk5e/bqpAGnsKTaaLdgSKQOiZNH<KEY>jIBekhf9pUFZQw/fa+<KEY>'
b'<KEY>AUqyqAvLyifH4QdSlW+2y2W8uFayWdZC9RlSBiP0krxHX'
b'xCgTBEDCNXoebQ247Xa1ZCXCSx7Y/eMfv5tE6y0Ltm672KpPUvSkNWoJ0qELiBiVgfEc6TLs'
b'EKU4YSUxBjNni9wOR5lFYlbvfTVvKl8xc5YV48Ewz3MySNYoY9NovQjzRc2sZVmKhPFoZLLM'
b'uQxAqqAk3nuuq8ACZFxW5C7PrDMtWlBERENoTfTwESFVISJrswgdfNP4qq6z0hJpECRoGl83'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tycXSZ+lUbQ3BEny73dsv0AbwL9W4dpgXR6m1B7sty1K9NG1Bjquxv4tV9bWtXZNsFn+2sYA'
b'<KEY>'
b'<KEY>'
b'w9HewYd/+vqrf/r88bfmb//fv/6P/+Hg4AAA3DAnR3VTzmfToshcnllnQVFYUNAYIIdNUyMo'
b'K+/d2hltDBevzowlYEkvCMSEKGIAAG1VltPpdMtthxAUSSWp5DTmDxZNZuJk0SR1Nk7LnZ2t'
b'yWRydnY2GBUx2ylzSNNAREGqOqjA+fn5ZDJRgbqqBoOB9/Xh4XHMnm6MWUwvMsLRYHh6fDKf'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'd0d5ctnuH92tpunSVo6ioCusndfA/ZuHZrlft8GX0EMJ8VDsX9x9puRBjr1blunl1x8aXZq6'
b'kqC2jTIAIkaVXisIXXF+wDXY5YY3feuiPeVc/4S+4r1UryP+6wauX26AXz9EKdE2cjlP+mg4'
b'pgUwPR6F2OXUet92XvV9v2Rj0DpyZACAOSwW1cnr4+nsnJnzbOC3tze3JkPMELGs/enJ9Pjk'
b'fFHVqBBCg8BNvbmxsZVlWd00zGxtJhLm8zIENsZFUoi1yUYUyR6iTzwBaBQvCY21NsaiAUAI'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>2CkiMPWmS6EMDu/2NjYYGbF5TKJ5yb2plPsEONyaXPxPnz4'
b'<KEY>'
b'Xixm+TBHHYe6EQThTDQyPvuqqs5OTl+9enX/3jsfffTje/Lu148fvTw8IpMjGEMuzoT59GIx'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'3+0p3uuoPiKJxwajmlajeRkaGmOIhIiYSVUJhFQUWBiaqp5dnJ8eH52cvFYW55yEYK11zijy'
b'+cXi+csXz549897H3PYgYXo+2dq5k7mBADZNU+RDQ7KYzkLTGIP5IIsBwYKCtBLIGIE+B5Py'
b'QQgCgEHKnc2si7lpGFRVg+cQgufkUy2QEt8AGSIbQ901JM93AARRifwhqknUXqXb+77l8uT8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'H50U4UvxRyyPnr989vyJqmyOxrdv729tbd27/86DB++ON8b1oqzOp8FW+WAE1hhHoMpBg0Mh'
b'M7l1yw4GACySzpkoZ0nL20FoLi6mu41XAcFIGEoS2JDpjlRVlWgQIxKR2jcuz0Tl1t7OaDic'
b'Xly4zIBzygyoLNG9Hpg9YoYKi+mFRUCC+XzeNE1mnaiCinCw1jVVVZEB5dn0/P3333d59pvf'
b'/IYPvc3MxsYGWVNWtSvy6UVtjOEgBDIeFi9ePiOihw/f/+u//usnT56Mx5uD0VCiIaKRwP78'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tNaCYFAV1mfPX1qbMWhmLauoCKFC02wMi/u39/a3cqOAAkjADEJQB/jsq2fTqmnQCgISqXjR'
b'<KEY>'
b'<KEY>5AkyeP4pECCvoQQUjfBKR'
b'NupveRciRhLeGCoa7QDRKGyMAyBhSKwsTVMt5tV8qqq+MYvZsKkm3g8YaTqdHx+fzi6mqOws'
b'oXDw1VzEUp7ltWedLebjfJTlVJVzVDbWRDcMIursTksBwObGWDaWVFkRlmpoRVJnyDkHik3k'
b'AAkSPIuAMUkfGTwDCVFgDcIgAgmvAqjGDEKCK2YtiLDpTcfrqtF563uhXa03rNnLEt2Vpb9A'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'4O7+e/cf7Iy3ysV0Vi1GmxsmGFH47LPfbkwmf/nnf3777l3rHDdCSJC0/8k1AFsDznw+r6rK'
b'OgdtwImqtgl6sZtIAKCKQXiQZ8zsnONgDg4OPv/882KWbWxsxAEkTYw9zGyMhuCtcfPFjJOm'
b'VUMIRBRFu+jidXFx0TSN9/6dd94B0K+KPAR+8vWjcl6996P3s3zQlJUCVfPFIC/OTk6YxTn3'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+dIYihkqe3C61Xq2TIVC/eFCipayNnFsDAYAAOccGsdqaq8s3AQOoQENlgRUrCFCEQlN0yiD'
b'<KEY>jZHhUqzKBEA/HwKLA031XyWaW0wl2aBEAw507ErRniXXMIMEUV5CBEDAwCrUgge'
b'q0VdFn4wkkEM0zRWpDZWCcFQ9CNTRWFlZggBETmhTxSN/mUS6Q5FhaIrKyLE/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>8M69O5s726cX5fxs9sUXX332'
b'+Rf/z3/+z3Vd7uztbG5uvj58KSmFEQKAEKpwSgsgUlVVWZYbLvrPpORrCYtGFU8UklVVBUGt'
b'yyNnqAru7+9/+82j6XSeZZnLs05yZWYvisIcgqpGMSxOD0BsggdRIgqiAFCXVVVVu7u729vb'
b's9lsoxgR0dnZ2dGzF8S6fXtvY7LpsiIbFPWinJ2dZllRLRY2c3Vd/t3f/R0R/c3f/M2Dd+99'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'1v7bP7TSrz07wBVd0SrRoyh+ZW/hqsDw1rOxf2P/84rIdNWN3Tt2AlT75bJV/f/2D3gASP5d'
b'+B3+P1eW7n27/jTGkLXIiGCWwlvLwMhp6FMmyigLEERPzKVjQysAZN6HuvFIEJq6WizKsgzc'
b'gAZlT5YIJCavAfLMPMhzO9nJVLY2i6qek58H0BC8AYZQZ6TDwuXO1KUoA4JxNk+t4kTECTEG'
b'<KEY>kLM1ok6rrUi2mRDwaDgXOOXE5EmbHOOeeciESs2Dl0A8SAIgSgqNiWpHZDVAJR'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>1LoscCM9d1PU5cqhpS3C11'
b'ywQBum4xxtTBOzIhBAAYjzb39va++eabajRARCCIcyZmz4he/lEAQMSUGi+RfCAzq6Yrm6b5'
b'4IMPsiwrXDYej0HQD4LOZq8OD8u6+tHgw8zmdV3V84X6kI2MSghBFWk4yF+/fv3L//7fvvj9'
b'<KEY>'
b'9Ls1869XvoMisNf+a3HPm0MiXPreventV/76vUBYm8Ar7ZFR5EUTuY8VMfIbEAIEEULLCJ79'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tyzHlS8i1joRYYH5fGEtAWpd1957UnEGm8DOGSIKIWDToCVCHA0KYZHFLMxn3MylKpWACMNi'
b'Zg3ZYrA5njjnyrIscnW2QHII1nsWiFGMiEjCStaRcWQzRIOGgONhICLKvp7OLlwxMDYDIDSE'
b'qJmxjgwTAmC0HgQVDQGUWKO21WjL6JKcYhWMQVUmIlGVEBRY9e1dgG4u+AZyqfZcvLQ3bdbG'
b'/bp61mZCxAeqaoyJKeHiZTHIe3kxQBcK0oFsRATQdtJC1MEnfWGvRL1jcrqKLkCBlSXSuPVW'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'GeH2cGOyuyVEs7PjxfRiZ3fr4f13P/37/wEmsuQQIApBI+wM5lk+Go0mk8nm5iYzgyFjTNyx'
b'vXCaY/0CIEAigsZKS1539+7dFy9eTKdTIsoHRcQXXAcBFVIgAgWBmFzcI<KEY>'
b'eMnywa3bd+rab2xMJpPtxazc3NzynsE30/OLZ8+e7e/vF0URQkOt02WcB0VuQ7UIvvr6q8Pf'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'B1RkmZOMQ3V+UZ4tzpp65kOFRGKCNHawsVm4wSAbmCwfDiaBwWY5mpxZg2duNfdxPhhjyFnj'
b'MuOsKLGaJdGHhKYqy9nUOacsxmWqCqhksIMySQwTVBVs3QN6oAJjDLqICkh0Uv9O147vW/5Q'
b'J04fil3+ae0DXGMrePNX68FuBWjzA16SLvrVqqoqp1Dd1JMCQEgrIearuGUtJnilkf3L4vst'
b'H9d7KAAkEsjk1dVdvqoUSItIo16zf+IYY8hlqqrACKKqIaUYW3boypxsnXZEBFYtb9DrorUO'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>3VdGwJLRCoIEurKoAYOg8wRggQ1johSKA4AnZyc3Hv3nR//'
b'+Kfdsu4akyx+CJ09sJ0eqm/iAvTmRVX/lZX7f0ylv+DXdpY3RPPfq6xV2FvPSecEyb0k4hIE'
b'QECDkcWEyJJhBS8aBL56fvTqdHpv//a7B1sZgVUgQzFDkhG4NSp2x+/49945n5an08Xp+cX5'
b'+ZS5efTo0cHBwbfPnp6cnqM1rL0Mr8kmKwiAvrEJRmmRu83xaG9rMtkYj0eZAYg5OVXjLQgI'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'UeMbXX9UVaPKNIo6KiCI0Vs6/POp/6G1wABcERa+PtlWf4JL20IqRLqMNlmJBFg73W9o0trn'
b'lekRXZR1mXNQouYblrrbdGnyP24jAJgjM0HkeUn1t9ltO9gqbZhvZw1oH0wK0Q8H+20Thp48'
b'smw8gyR6lFbJHb9HUpC+AAAxFQCrRDUqADiXRVbKbnREhDkGRtNyk+0e16cjixlXehabfsPS'
b'fpuaEtMpr0ti2DVu6TAT+1n6D+06Blp5ZnUmoAoirbAr96eTwArMj5hHVRmW6a7fRCsRHx3d'
b'tAyR4spMVlWUyNkCnQUDEQGEYJnIBQCcc1mW9XekGAQtvZ7sP1clIkIV1sjz1R5ES05VanUg'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>eD87vxhkedNUJjNZZgU4J0DUzJnCWa4rIjWg49GQfahi65kFAEXLpjLG/Mmf'
b'/Hw0GjVNk6x+Lc5M6oZeCo64QyqKvqEAsCaz/kE0Mf8nlDWg/wNx/823r0n8XUmTMurIAaKy'
b'BFvlSNryUCNVCQAAQS1STWen09mzw+HDd+/e3dvMAAiBFCLGjutjb2OwszHQu7uNQFnWZVlW'
b'8wsIze5k3KjWnknBMCEiGkCLjowhHOTFKC9Go9HGaJA7jL5G2HOblYQPUQHqoEfH02+fvzwv'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>5WjAoxjVpcmyxepMPJV+dxv7wDdzt8C6Ta3lCVnilw+cTVd+laGb+MyyQz'
b'<KEY>
b'27t3/8Gd/eePvglmYK3VuiQTJHhFIaKGxSzK2lop6/msHG7YuvLchFpCyd7mmXMu1M3F6Zlv'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>FFMZArS6Vo2SiUi0'
b'kfZis1Z7Jp5OsTfagy251Jtu2Lo9uvXhaZNzxb0+Uo6CwiVGoG677fr/<KEY>'
b'92uGq7DLdfe+yZc38+pcvkUQIPldQOc0c0OdMYiQLl3TjU5/a4M1/Vwc3uvbdp2cvP6s1ZlD'
b'RKs5HMyVvdCJcIhgDLrMZJm1tcUmqGrMpmSQAgdh75tqMTurFnMHDXMTPBVuEABRCUBCE+rF'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'bB4aATakXFjlkuuKyDbCe9uT48PFcHOSeWGVPM+ZmQjv3Tv47aeAGtPSCSkYBEVgUFBRJGfN'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'XpDIQgM7CQyhWkxn02N0phE1LnPOWUdEaIg0bfFgT<KEY>'
b'oIYAwLN0yNh7zz5wxoQWYk6ZpFXrMDdEKzoQqyqjhNBE7osVifSf6XTQ68S92LaVcdfWknPd'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>aHe3d0dDgYx1VdRFJvOdQQ71rnhYGCsjRYA55y1Ns9z0mSf2d3dbU15kFkC'
b'Dr6uR6PBX/75L/7rfz2py2pgM/VNqBvOMk+ND4KIUvuSCABCCAaJRVR1Pp+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'LKgakySodA0AY0xk9syzgjRmGwoSGDGoiiorEpFBQIw4KWIuJGb2oW6axuSB0EZhfgVkdFpn'
b'ABUJzIASQmDxIuEy/niTIXjz0pOxV2SMNdD/5s/qyyqcQmKvnaxrO23qhqgUacWS3k/dF93c'
b'Sd2mukxGLm3+BOmIgDrymaRplkt1Jb//ZRt6T0zO+NB11HqsLbS6/7YLrsUQqhr9w6Gl8Vks'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'EY3+f+7erVmS5DgT+9w9IrOqzqW7ZwYAAYKAkVhQXJKSnvQiM5ke9VP0A/Qr9pfoH8hkMsn0'
b'<KEY>vIggBgPMTN/OOZWZEe6uB4/Myrqc06cHTRHaeOiukxUVERkXD79+fnXl7pzT'
b'tuuly3EXL0qi8Hhg5qrF3ZlZIAGeW4b9nU2fvXrx53/2L//yL/4NasngonW6H+tYnYmI9sUc'
b'<KEY>KJggD6FNmyJ//6X++7bNZpQZsejg88z7RZY9HxNFMqYAnUIDWpPDih3nfn+71'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>YOj5Mw34cdyklPfAzzGKzHNKl1Jmru1dnoJKSo7acRymMgUZEWagGWKK5qmo1'
b'<KEY>'
b'k458zWLSSm990vLSrLvCHXTkAhSwUQQyM16dqaXH8w9PvOz5+NcPGyWkI8F5bpYpjLHs7FAt'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>7ENv1YSzUd7u9/9tOfvv7q13/97/49'
b'gxJnrbUU5eChzMFkrmbmpcYIw58WSgDU6Y/+8Gc/+fGPa526rotUIDxre+3E8hm7arYuAk05'
b'dioAHEjYJY7/cDCw8PfNGcJXHpPtAINBFr5WkUbwbJf/M5bZKeLDMsAaEeJAqh57ncc4DDp+'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'wqnbbq8/7/t+Gofh3TtL8iZLGSfp+7iNBOLutdZap2kYm4NEwMOHxpiA2aXEahHJzEJCBCZ1'
b'QyISOLuTVldxz0TEJHAjl0Tmjgge8qrqWsnVrdo0olbVBkV0EVDlZI8tzx+<KEY>'
b'o8BPOYjxh21DM8fYfmSOpp49oqJI8KOsbfOBAgCFOxNzAjjiHWjNQzMFxD4RQS2SErJIhAA0'
b'/rCN55Cs0Kz6nHfX5x5VNYChDF5rVbOiU2CW11qW1EV2nLtq/e+yq33W77i7zHlgADRv+9XN'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>lGKYyhjY'
b'yqqaQ2GfeKolUaq1OicmMiPARUS1auXdbiOSai0Eli4Tibp13SawqlKXp2kK8aeUIsKqpqpC'
b'nLpEJGaWWG6udrFPrnfXqkrCIqKqXddZVfc4vAarRet+v4epu9cyCYgZlnNK6e79/Wazi+yK'
b'+/3eoF3XdZ2IyE9/+gdffvnl23fv4XBjq9UUIgJyrSVgaz3kN2KmWDVKKf3whz/84Q9+8Pbt'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>LANG<KEY>'
b'<KEY>'
b'fejfv5Fu2+cu1FocWOlWp2lSLVqmUkbToqrQ2vg96GLrcFc2BgtchJ1C/neFVYcyWIi85Tl2'
b'Jgm5Sa24mquRK5uZFrdpFSRwIFBP7KunidhlPdSxyOrH1Z7eXb9laS4fc1a4haE7etOjLw7a'
b'nLUgGu40S834KQCHLlhATcQyI6J0LH6v747439edPiGun034/BOnQ3QoEzR+L8QB7LM0R261'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'hcKTg5kV5u6J5fb6KqUEt/Fhr1pMS0qcCJw7dvNaUpdvb6/v79/v7+8C5+eLz17+/Oc///r1'
b'<KEY>p+B<KEY>uYm51zU7WFk5lINQE6plJGTuFExdcI0TTc3+uLF'
b'zZrwRh7D4LsuMOsf4CbplACt63NDlpg5rtA+Pg5o9c9Xns/6nzJ<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>hS/+P8tzJ6G2Ny7MwAlqzSkNxxHyjzec/oNAclTZiEAsBAo3/VCQg9zZyGaQJTKU'
b'UmqtS5tMlFLquk2QUzMjMoNZAN5LZKc+ME8Hs8Wx6uH8Qly/Dq3eOp5H9K8RhIWZF+ibZepO'
b'ZuyotZnJXkpbbztIFMt41h4UUVmEVTUgL4hkibhbWnN3JgkiRmSqtUtMMHIjdrNaSokew6xx'
b'sqzL8HA0IdZMWw5mashLAfQCYoarpZS0VGdhFlVjZuEIkiaQxW3eVEMXTgZHjiwid0XOeRwe'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>
b'aG39EydztTvxO6Ao/acqT19U62rLoV1/xseoUb+7bPDkwB4bf2Osjys/3UhoSghA8yEJyj4/'
b'MCBM6kThe7RmDdeUdE3Qj0RKiFOEFIgBgIDMYXJEtIUgsxMQ0XGc4nmbzylH9T/xCnziwoAd'
b'm26Otk3o/s3Jba0PfHpf0Zl69bsVoouCyaN82xMVLtVf93Jewj3qAyyoHx9MAuAWWS9crZax'
b'Tqal+q6yJE4S8XMAkMEC4UzEKaU+5bg5WMIaxQafxno/7N++ff/N169rteub25cvv7i+3ibO'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'IjfzYt5JCkGfmc1qSA4Ldp4fy1pBq9v5n0URgsKqMJgJpuz+9a+/Wt2qQuTh+O6qBBd2dUiS'
b'<KEY>'
b'mxk8QptyBDiJSEieYRwgIuaNQWutbrTZbBQ0DAOhMnPf90Skql3uUkq11so0DMM0lnEcU0qN'
b'sZkzBuQlSjglp4OPWZKUUsIcz52yCFytJqFtnzXx9fWrWuvr16+/+eabosqcXn724qrvaq0i'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ksZpuGNmDx9Md3VHtnCoDSLbdV0fzveS4pETTFFKuXt3d/fujaoLaLe58u0uUSKv7uJuzpDE'
b'qSOCGbEZ9pPe7QfFmFO/3XREBGF1K6VUY6dN12WwR7YxTt18+VXXqekRmZ3Z5ovQ3Vt0BFEE'
b'mJI1MBDj9gENDbsdPePJSXhMaRxSvpeul5RS6iRvRHKSjaTMKUM4kLFd1VxdjaxYywT8aU7B'
b'bBF5Rs0zmvx8vv+xwn5qfDhhqdccKtGsXI1OyS7Kn+vDRWTuGqH6LWQiWrBD7mg4wwO+8cCj'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>/Try7owc7/p3L3xLeH4D4d7FjatzJyFzWzaP7jVxLi/v4fp1dVVGYevv/76L//y'
b'L19+9rkTXV3d/OIXv/j8xe3NzXUtEzNvt/1n0tVqlRVWDeaqzDBQHfb/8I+//MU/fvlHP/vP'
b'ui6VatPDUIoqPFGSidxJhAjIuXf3aZrev32/213nnFPue7Bd2du3r8dxSMLuxmhxNG4WEU0f'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>uZTM0hQZW/abwd5rVM4'
b'NgRcqhOIOXUbsBAbucLZCW7BMbspLmSjOaj/fdFMrybWn7iV1teELwr7ZX7aKx86Op9krC6d'
b'dbPNg4VombRwNGpwMYC7j0P51a9+dXW1ffXqc7XTIS5TrapEHsY3uA3Dw/t3b70MsMrC67We'
b'1wVYIpWPqecR6SaDW612dXP96tUrAMVUzWs1d5/KRESuRiLV1Fx3XadlWgPb+mwvOlDR1caI'
b'<KEY>'
b'/+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>lUAHiMOj9W/1KFfwrd6j9nWRson6w2J2V5rMIlevbBuT2q3LI2ts8A6FMw'
b'tf542Nxjw7MGU91gntaX6EJEvFHSCA5bvgVmR8aDVCDphBwTkQDuGimXEFcKEJG+gBBJ4NMs'
b'EcKzDDBzcqs3ivRkc3bJeNawaoBZMFjbBI5m9bdFG8Qz2JRL/T6/j2DwGusfV5+5O8Hm7LA4'
b'UgZ+F7b5HKelrTJ9wDC1FF45xH0wj8Fj5TCTttZhHaF/PrGf56v03F8xklcsCYwMVs0MsOpq'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>7WPTIHkg1R8/Bpe7VhLs0pw+IdW0TNXKHWOk1TKcei'
b'wgVhztm9uiZYmYb3b7+d9g+d+Lh/cMuJiXkb0gUtF2T46R3KAvNgwArFBeCcrq5ufvKTn96+'
b'elGqOrFWMjO4TtOgpe7H4d27d3f3b+twL0nMDpbxw/Ym4oUIrEwA7m6OrsuSukCmYuau60op'
b'88LF3KVltCJupqrKBJG0TL6pMQdoJJlTKeqExIBpSgnq7h42ENVKqgE9RWQ0KNFoZq6Vmdmx'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'4VcnLTytuFrK0bVx9pwDRvMs/uxS4+02WKqtd85BBjgWALz1ktD4rbAnNUChSH++iBVMBDcs'
b'3kTuZ1z7U9BPdEk1/knK+j57rgzwzJb50TFbuyB5jl9sF9XFpbzYyHNmY3Xqmz/Wx75sMC+0'
b'Ctx8uqy3x6GjC21e3rEXGzz501eZeNo+FyJzGALA0VseIhOkBuZtIQiAqRJYx8qFixWlbirV'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'TsO+PtxP79/tgW9y/mXXdVfX26urq6urKyHOWW5ublJKHug941hKeXgY7u/vhzKVUmrRWqtq'
b'c+IKvZWZXV1diTcTwTRNyxIAGIaBmadp+sdf/oKZr6+vv/jiiwA6r+MkIrvdxt2HYRiGh/39'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>2To7bOXEjIri6sxExC3PqNhutnDpSGzuXpul3NndmA0BmcX8Y'
b'<KEY>zCUaSQWQ5qK5b43L9M07u++GfdvbLzPKemkNDoK6+QwDMNe64Q6Jq+dWx3uH6b9'
b'YGZQZnaWlNIm9Q96D58YNaUu5+xgd4IndRKGmxIRyA1qICd24jlpCBymbj5HzMSVvI7IiePI'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'qcxnzpSIOkm5S3/8J3/6l//nv/7yl7/oJat6IJnO2xvMjOMAhsMxCbdaJoMTs7rzwezfzqDi'
b'cCLcBHBYpMFox8jMGvlxLKY2FgfMSNSBQOmR7IAaiMQdFna1ZrUEiNyMwNUMoLFUAJPW/VTe'
b'3d9tu3dElFLabLvFQYiI6qTjUKZamdmJSinDME3TFGA+OW/ivmHmqepY7lsuDjMzK8rurqXW'
b'WpfAQgDv3r7/9Ve/acHBwiKy3W5TYjdLibc5uaubiUhE+t7e3n7z7ZtSNJOIYBqKasu2xClX'
b'<KEY>'
b'n/50d331/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+eIbXbwL1tz8utoR20dLQqjLbfq6zF88MbB1XZ+tQOuZPNArahGosybL3Q2Q/X5fpurJZ35Q'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'R4Gfzvl3Q2W93PLxs9BpLUrrD/XbrCWPWR7iLAvNt0zk/WRKztaCEgkeMYuSTDWgeCOfNwMA'
b'i0hx1xnzBKBAG9BaE3Okw1OFeaijXHLy5wOVP6v81qnlPjrqgIkQ2QeJHWhhlw7Y8d47FhJO'
b'sf+f5vWfQwpWSU9nJDs6LG48WcZ2+rN4kyVPMLCEthwGP4/knFv94Ng+VEJEP7KkEcIcvSXe'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'cjBnvDVEWqfDhCzH+GyyaSE7R+77q1AhnxUfRM3Wp6qqvqRMPmwnB2YXqdZySA7M7lwf2QnL'
b'fPoxRtBiMYvy2Wefbbc9Gm86k0oyzIwaVCkl1XEaHoa79zbt3XWWulWd1Zsdo3GiKaJgWqeX'
b'<KEY>SSWmsAAQWz7JGSlRju1SznvN3s3Km6OcGZLFRHNKeOI4K7zDLVoceWn3mZ9gNP'
b'T+wwEDVvPwIiRaA6uOUad4Q1YPlJBJC4LfbIefWNwomuJWYO81WMAMA8AHKzSrM3SaC+NkvR'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'D3EiDzwmHjxRjiThjy/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'pXCRFusyx34ADe+fmQP4dT0z7VAcwJPJ4KQAOEARiFzVISlRVrdSqpkxC5Hsbl92u23aXPW7'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>HPpQxKOlb73s0PUyc5dWbQ4mWs41DYa/j4Voc6'
b'<KEY>'
b'<KEY>'
b'ip0NKHimc6r+GNceNXnFZLffkjkCM8nnBD0xleYUqdpm1jmU6x5XChBx6HHvoLlnROUl0GAx'
b'JBwHy174fLEYXB0pqMrZtzQj0gMevgTLdBDAqSNOM0sA90jSZXEuFt+MZRh+zOif9rWWtebB'
b'06I2OggPnlK67vqTdogIiAAwX5Z4Wb/G0l3KBHwyKprdQlJK0zTpzAumlMiMeSUnuMPM2QxK'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>UUGWD9ZHoaWpUsMoGijDWwO'
b'U4FmcvFKLil11amYKqWiFrSMCVZrEGN4zSmx2ziNAEvKTKjWSAxf0oV/wnLuc/9pj4Cqklv4'
b'eWhRIe9EShljD6452qaluOyxerks18Dz9+ph+Y5F3O/w1hc7XbXzqCb4YlNPqznig4DmAHMQ'
b'kUjO/abvrrrNhtjN3YjhTKucppKImZN0CgdcfRrH+33uuOTAUmRGJgYlcmwzX3f8Qq57v313'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>sUAyNyoOXDyCophThNJFLgBAIgNburkDrCTwNRh3rBtQo1KEbsXgeiA'
b'VXdxtcj3c6w3PF63eGKz76IDzSIUJZL7hivB4uVPbQKoS0wiXde5eywugAX1P5ysrIXIN68t'
b'kIdY0iRAgzoCY7/ve9MI85XQzefUAXxz8+Llq8/77Ya7nDebm9uXX3z/e8LZvBIopQxAVUNo'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>//hv//vNoJ/9a/+x//w'
b'H/FQrrvrlzfbl33XCRsn2XT99e5q3F4/PJSHcTCvDsrdVb+97fptLePw8H7a36l62myIshmV'
b'ydUcJDl3QldmhUzgGtik1FJfECPBquuyojHJ1GBbyYlAgQdCLCIiSVLilIk7zl3KG0mdMwXy'
b'<KEY>pI2KkvYEHuZMbfWDrnK3RM+XENSv8wZofZP1X99RpfZodHohnPehC6lflpMFV'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'UsZaAI+cx33fb652u+1Vt9nutrfdZvPF93+v222vX7zMm23EBogp1Lo+MWicKjMXc3bPwqpG'
b'BBJy91qn2JOllof9fRmHruvKOFiZEvO4f/jq1796cfsy5zwMw1dffdV13Tfffu3uu93OvC77'
b'WVWrGbCCAX2aSj6HXmC1g5f67IvHwUdcw//E5VlKUA5OfpVVZHbp5haFww4PZ8j2xMOGNn9e'
b'WwA+Vvp5zr31RFmf53N+6zKrt8ph/8xOgGdz/48/+eCvzGvAqJLXPvmPPv/eH/7+dQpsOUCB'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>1Bz91RwKc4qL3D32Ddk/'
b'IbV/vuD3GLU5l36fKCuBNvjgw0OcKSOXTpfj1jhvrOWHw41wtPGCDwaAo9xVT7zgyW9tNe0t'
b'6OdDtv1FlFo/bHj/TkFlGa4wdrZ4faZwGWNQwIrHgWje1dyl1EezzBCZw4j5VGpagmsPwCFn'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'4COdAJhdYpNOn3xa/esJSzZ7lRz28OGM+RzD5C113nGJUHo3IDzX3E2D5SAwAuw54lpmmrL+'
b'fHSw1xaG+DAPa6Hg5Dj9VfvzwMLNa8E4pe8Hhd8Rr3PM4LafH1z8eT22s8KL1uqEV1/Xie4B'
b'eIMs4GUk8+E/SPMxJItI4EaYDEAFsnTQQmaZ689+/OMffZYBFOB+wMPd9Pru4Tf3w+QiLGRK'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'a0mWnYd9a60dcc69efNdlfXqrm52U2RTJkE9SEqyPNBAHggwDAOeGDZkeKSBYf8ADzwRPPBM'
b'EDywJzY8MjyVYUuwYUIWAUs2JIqW3SSbpNhdXV3vR1Zm5c17zzmx1/o8WDvixHncmzerq0nt'
b'Spw6N07Ejv1cez2/dVlX55cX1dckbXl29uDBvQevnZ7cWT17vqp+vroUaKiExLC+KEXrplJC'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'mJKBs5FXiwjsuh5lf30syeAKx1RnQZJ935OZmElEtkywyDYLQfLLagYthAYkAz0Z1Ws4otaN'
b'<KEY>'
b'hQtQymJ568x0awyp0VJiEZ6EcevYI2j5dZpvfd8EIdMiGlOsnqqIFFGY5gymwh1jeMbeLsNM'
b'KhjnIqZNNpecvUZTCQnDCSEDokjlQ8pZkxhDMt2QJOiJg2eaqtmpbVQxMJdcRGAUvdLKJWZM'
b'vj9CzWz08rfZ93l3SrfdO6pl6mxLDtCHiKRiLhU6i7QqmNZgbNb+xRdayuXzi/w1E/eWUliH'
b'dSkAnGEK0ikQ0zyhzXTZnUgz4Nvp6em02dvikTQuFUvIowbRGg3vZJfdEpGQG7gA3aTsCQ/b'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ETPpGopaw9WbN<KEY>'
b'<KEY>'
b'pe/L4rRf3JJuIaVng/PL2MIAIZG2TU/If93ifEzpDpnbdxzklygjcTi+9A6JWDuGr66QvBEG'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'VvfuFBxBAdretrtmrrLqYHb9uvlVQuYGFyohUYfTRc/<KEY>'
b'<KEY>lK+NJb+he0QDwCWIoTIUGsgaYOFaDhkdAxTKyKh4kj6CQMKaIR65mAV92lthBsC'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'bf2TZX/OClxTxhv2bxufTfvANimPjApjpKVoawHI1zWN6s57r9ZxHDm2Rv062Uyye74K8xY6'
b'WHblyT3iP53l3NoyDCP47N6xfdg8cidF11W9OPLUFfff/GASkRl/qE0AAGqtq/XF4NX6DnCS'
b'Al0sTtLMIrN6RETmwYHj9ZiU3zfo0cRzJ0MTEpAM2BWDZdYkzmo7PJfnXZu3cH9MdgFLOLrv'
b'Z//njklTj0aFMhvIJzVYhzrk7gAagl9y5Ldu3VosFn3fr9ZrEXF3sX2MvmnNTCRKZiLHdl5E'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>O'
b'<KEY>dNA0kqlXRE2EgYBQBURkNQsyfcbPbaLBMMcToJTYz8YK0DSHfPtpmZFCO9dJ0P'
b'G4weuvRITHE7OAsxCtuL0l2sVqP/JTLKyt2Fk+5ERBJ++6plNw9yaw7K2KX4YMTg4XV98Zyy'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>
b'<KEY>'
b'8mPtmGnCkgo8YjyzDj4jffmUmt9D2n/K9l9+b6fGLluSf9Y4vjFJjlxooJ13ajMm28brhGY+'
b'xEkAmE9Z7EiqcLKJExF7QoUCQKgWkltPa5KsbJr12kAgOJc0Elg5V8XW40Nmoubu9yNhPMcY'
b'tu04HPKK12zevKgHssGcMuy16mjZFwCuZ+YOKzog6Ak727RKBIUK7OzJf0UKma6UudnGTdO2'
b'fOzdeSUTP5MA5r8eHaivwIsf9f6/otyY+b058szhnTyIDTi80krSgkTauekL94sEMrxM5OT0'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'n9Znf3qyvHt2uZEquFivLtxdA53VIErXoOMEpb9l3aJ0C6gFQ7tiXU8zLctucbpYnBZbiFj1'
b'S/chPNTSX7vAikaERAAiIEchN53ZRuu3CJoDawgFWvrS9123sNIHNOHmSQrBIOiI2o7LoIKI'
b'hvwjAKecd2Rz/pnW5KQUvBFznIfu9TrpmUJ0ToomWJJrj5urDtR9vckuQKQQRxlGcpuglgca'
b'+vkpvpWp4JBIXWC7MH5hCLmT8nZWXtCpvV5noi5w/0i6hp/YpZ/pzbhVIiCjCqkYLQACkLWu'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tc7vPNz81/w5p7Nfjen/auVw3fxJlq+m1bu+NL6BToYhFotFvuPy8jJqAk+YWkQgIDAj1CP0'
b'paUNnW9VoeQVtX6IqqCpGoPuyuiKRbjAQU0P/9yE0ZiSEKTfDoGGniaMERJFFXBAghDUcGTE'
b'sKCQyiqsACIueykZ1UpYscKQGtG8Ko9BDzWsRSowjIw74F5AgbNW1HWvQfhQq0MELBEilFoR'
b'Gegn6bAU0mIbvnIZ6obJH6gBGMK7CKEHwiRkykUXm07E4ApN3xGPyAy3yUyXAxVrxtfTQyEC'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>/'
b'<KEY>'
b'HhYRcVCKmRkjswIjA083dTCz9M9L6pH3DzNymJw2Ww4QYuJatTHGAkRMeRwUIgITFQAeU/Rj'
b'wj+lfaVlMm5KnAMfmLHdO1M5XjVmiL2Mp0TKBrPPtC1Pm3HHh362rdgUQBmV3Pqik3peIGKG'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'dplON1hvAW4czYuHDjjCS9GuBVjjxNCLVFZNu3B4qKlqMGrUBE5IRmtSruS7Uqcz1DUQvVGw'
b'fnCr+85bDxYnZ6e3bq/Xw/Pnz1eXm9WwWdeqUszMK8YAQG2n2q4mOsUnyxQfSKYGjKjDRVx+'
b'+vo9vvXGG68+evjo/sOnT58+fvzknffe/6M/fvf5QJWluwTTfpHyk2dkraDSldKCGUDUWn0z'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b's84cY2ZtSAXK4VE+ufnuNX6/I9NaGjfD7C37TomYTeLcof/oLp71QiGxRQ3c1hkA3J2zNT9v'
b'FQ5W0XxH763wvTUwr3P+5+yG9n1PGNYxsd3e228aA3Azqg0cmBtu/uCfVNky38CEUbBf5uM7'
b'<KEY>cMbvkL5KWs4SgG/3iLSlMaHcsUNZ59X<KEY>fSQWwv0AgAP79+7fXan'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+N//j//zH/xv/+jZar0RJUoe7UhgH0nc9NT+J7cUmYkz1ZwRECsnJ7fKsu9cWO3Eq6uzoHQL'
b'CigSi+UC2qlJIZRmJW0jqlDV9XpdK4p1qoVRfVjX2le/DNCoApgxIiQ8SIQovSXaZGbsTHRO'
b'AyGmokVE1DrteiudaAlogBF0d0QIK+EZEE4SiOb3z51DDoCOeYXwdZC+w0rmhwvJ9PWd345j'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tqtlZKgOZQNySg64z8sewqjMx1y4Nb7N69+Xn8ew7D3mU1UxRoZsYXNGAWA+v9N7Y0wXvTf7'
b'<KEY>'
b'7E0YjtHWuQwnmmEAIqJklWP6kqNt+tmU/Y1KcsutJpQ4Sfp8oR9+mSZmTjswaSinkWneIPvl'
b'aGfHnbMvgYx1yvzCzFI5vz8n+8iG3NZ27OS4yT2Hj1x1G+cACDcre6T2aM0iYuR3Xj/t0bw0'
b'hBDBnVsGGMeBun926/zJ4+eCjcuhUe/FLRnbTxIErAtPV8QBPpxqvP3mozcenACouwrpZKYX'
b'<KEY>'
b'<KEY>'
b'//<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ZzETUyYGaCCx6CgAVbSUUrTru9KbdZKpjlpsBgPOaK5hEZW1NvGWLkFrKFRtw2LkaHP6pkTA'
b'bb9Pm+6mWqHrUMWOHSI67qTrypy2N0XjjHsTkTQsxIROqIoU4CAhyJCk7KlIIamaZFlFG7Pb'
b'aLtHZkjYa/D85ObsvdOxyxmzrqqc/bTHiByeU3On+elde8M15zOm2sY/p6M8eWpBJkBRDRFV'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>OFRKH/wre/cf9U0nf/yQU+'
b'ffzl+bNnJDsrncn9e2evvXJLgNsL/Nmff/Avf/TkycWqMkMSrNaNqoISTXgBMoUQYBJGv3/r'
b'5Dtv3ksa8+Hjy0+fPH92fgng7NbytLcH9+/cOysF+Llv3O1Pbr3zwedAKCyAVFfvAQOl+X0i'
b'X9/57jdzIJ6v8MUXXzx+/Phi9VxVz05v3b19++3XXimGRYe/+Cu/+Fu//S82589NhWKJYJSK'
b'cAojqKIvs6g0alVVs5LBr5eXlz/58MnDO3eEcdbboggE5xd1E4wIsbJyvvfRxwAiwoMJtGSi'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'dF1Hib7vN5uVmU1ezbnUq282q8ucqs8ff/LHf/R7lcVpIkVVpETAK2Rwr0GBLaR0UPHIaDyq'
b'lG7RLXqqBOXycq1SSimQcB88BozBayRBUbUwSgSEpaTmVVXhoChUommwtMCKmpkWsaJSGkvq'
b'GR0SEp7cQGaTwSzbyVQOCYj/NHEkL1G+sr/PTQtnIP1JtFRbLtStmJou5jLjuwEcKNRe+CLM'
b'CPicT03F2nRlLidMN++Q0111zAvfvv8r9ViW8Jz6HUgkkgaJ6uthCGT6C4N4wkmBjZZkmbxf'
b'Dl4tbBE+V6kqJ8ZxdjGhXcdzW0bLwI4gNCvhPhdN51Mz51an+5Ojlf2J2FFgk6k+2gcymjpM'
b'tsiHWdG9Kw5GhI+RsikDlM62AUTYrj1yPwwse/rqq68uFotucTLZHCYvvv0Bb9dlj/UfzSQT'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Q0SYICxPtrQe7IVw5Uk7TeVs4gS6WKzXA+EU7bructj8s+//Xq8C+p/9+e9879tvBvAHP3n3'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'+<KEY>'
b'dKKJA0USKtL1ZdGLaVDcA1q6brFcLruuMxPL3ohFBJLZD2PpxE1k9EsmbCTPIhYCs07UVHXK'
b'<KEY>
b'<KEY>eUmKDMNw'
b'eXnpNSYUGYiIdU0ianLp2PJmNDjS+HxephiA2Wl7OD6NiZemxVPVzJheI0KnFG3bBw8mcfx1'
b'9P8BQG3A09y+eCtaXDGA2z+nkusyA26aw32bRT3MBo3RX3zLQk2MJqGqNXZ6zb3bqLW6WFf6'
b'<KEY>kPDmSyc42bv8xJ799jgbMSem/19mD0Zh6Otf5tl8lUpWDnTKyXjsOmjIbc46N'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'ekgEpSwVZRguzfTZZv07v/u7f/kv/LmTpd7u7c1Hr/7o3Q9pBplSQQtJUZkOgBcusLwhnCHo'
b'+kUmxuqLkeoiAtlAKuDAOmqFDNYFpSAWJ0uostZixUlTSzs42vntgRChhr/5xqNXX3mQC+N/'
b'+vv/6w/fee/2w9crZLE4cS0RasIhai/87f/nB7/2y9+zgtMFfukXfu63v/+HYguIKJEmfqYa'
b'TCBjTmjh8MqDu289uq1AKP6v77//Bx889ZNXXcy6BRGL085pl772Ovx3/+M/+E/+w7/x6DYe'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'j8DePfP2TK29qi97P03VHqZgu2Y0rrrnK++OQ6nvq9XztZebHNCYy5ZT1NYNX/BStceonpwN'
b'kP7U5to4+HfNndmgUIQSElRSIoSevnnSAjZbGup5DNDOvkWSwOYoIDOb4FcsEtfk9ZiX+SEq'
b'3Lf67VR5uAqpL4TcmUvqR1+93+YbtfyKedltzwsHULmFmXdupyarcPeh1q7rEl7CJLnOagpT'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'oFPtVXtSVFVMU7OnpbOuh5qWTqxo6dR6tV6tWOnMihYTLaKFk7q3QfsHIpAwoOTkkIBD+vAn'
b'WnT2D9gSk33KM1I2BbTFtrz8GSSzgllAPOfu+NsEya7c82GIKWSr1aKUWTboa5l1zhWce6zz'
b'kbtn58XEaV31yPXTxxFHaKpzHgg0HxCkOTC4Xq9zV0VEbu2uLLAT0bj1vRERRQuIIkl4sOLY'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'cSlPz8MBA9549ZVlh843hX6rVw5rVlctg1iVLmyx2jDEUtOlCE0zRbPeBhBbOM4xEdjTLy8c'
b'<KEY>'
b'CJ8yASOogIaEt/zzmb5+GIaUheahmaoqQhWenSwf3lkA+PzJ0x+/+xOqiGn1jQjMMmWzrFeD'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'TAFpV2N4dFMcbZtBRJIPFh0H293JLbdHsohOqRXkCnevLGaWxHM61iN2sESxwyVnO9t7rmrk'
b'nHu+ipOWK4oeK3ubYm9PHa4BjFtsXsPRZnxd5YX1/5TE6mdaXqpte+P8Ahcgvsgh5FrSrwdJ'
b'PLJc77v/cmU3i1OORW77Zu7DKN7tLviWnnNbz5GWNo3zC9sw7/VWHbsNkEic/p95bLjMUG+v'
b'vOHYhFw5xYcC6JWI/nGNLfv6d3G0tMjMwpi0ilBXdQ6dMIFItMHUpAGxeBAiVdipOlxMd30K'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'PVkbxYh7d05zWN/95FPplnAIotOQXiCDB/pu6eq9mMe6Mjba/ZPf+f6/99d/CRW//Avf6/R/'
b'3oQtFyfD+jLjKBLCL5MvQ90kbDQAlaJ9Z4PQKZCSRFlVLF30RaiiCjH1KGJ91/eLxYl1S7EC'
b'ttR1lQGEdaVYL6qj5xZIDwFME+moJcSZ3GKlOaSNfJ4SoGRa363NM/UmIBEuzR1CSIlRaSQy'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'rtP0sA1HapW4enmoiE24ZkJVqpOJ1SdFIQIbYY9HYqfELCqtlSlLxvhnw2SgirtY5BqV0+Ui'
b'g6Q2Q1SYaB+ODqQPol0pxX3VtEcihIqUja8rYcDtE42IrlsMQaikUYKkwLgNiFFo9+xy/S9/'
b'/Pi7bz9Q4MGd0/t3TgE8v8SXFxfPnq8/f3a+ql5s6W3DC6EhLf+XznrR+rJL8t29lOJOVQDm'
b'w0qEBhLitWqxwaupXFxe6tkJgL7v+XwVEaEEKaqQhId+ie0QAlFluFqhDx7BELOOJMPJyDlW'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Ey87f2r6cpTX2eOhj3K9LxQD5qcJx/AGiiLY8ABISEhmXQhOrHlKNev1ehi8MhRMi1cpRbWI'
b'DeDIhAmthaIc7+m+MnG8DQ1Zdfu7iIAJWyktkrR5L+4GbIzC0uTsl3mJm0sAfe73T45g0M0l'
b'eI9DvVLn3R6Hk7rnMBMjKkUKk6PfWKZSDsn82+OAI/lUQVH7/NmX//y3f+fWgze/<KEY>'
b'Q3ZXqVmLmMopUlVSZ/060NzDpMX22u6PKbnZFv+HIg0AdIcfS7gmoVBi/NT8zKg9mfkUzNdt'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'MfMaATE1qjhDtfNkp0TMFO4FrCucnUAEFeYoIVVJH1N3ARJiSKkDCddgHz85f3L+/JtvvPLo'
b'wYkACtw+we2TU94/hd5/tsYf//iT83VsKkLFQSVgYky5IjNolnQGCon0g9L2RktwwhAECDUk'
b'A6fiKQmbVR+GYdCElrOOasqWRaal4BnTR+3MH7b7F6PWeCLxToFqBBWW5xm38y0YES1VFQHV'
b'IuG5ap2AROkafItRPWpu+HDvzE6XJQAHLurGi1ZWk7KpQahpR4qKVToQHiiqAKX6Zr1enC6W'
b'gBJFjZBhPSy6RQu8s9SKQ4lF32eAweXgAOpm6Kx3p0Op5hsvamYWUTebTSlltV4Xj2fPLh/c'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'JpP8TK5md83k/+e2ZQghKiDDNRUFaZIHAQYyhVgokJ9On3T7O1tAdt4EuMlkiwQEPgubGbch'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'nsQm2WskKH5pYNWWhKj1vNaq6BwoJqCXziIGijmD1K5Tr056M63UTUmIna5E83SIy9Vz1YUV'
b'cW+7W8RUAUJLZzG6twHu3vf94AqxxeLEfRAxLelFICJCAbUw7fqqGUQYAoM1s5UVTc5SQgkR'
b'CHeSvV9FBOb6OcjWQgWJ1PEDID350V2maa5FvvLQxS5Bvua2Fzb1pykvW+3N6XaTkWearpFN'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'tNWDmQUms7tIy+TQvodQQqGzvjDQfGf2uckbDuteIXnN7bmYSBoaCKIA1NRwN2sqRnIikWkm'
b'OX22SuACEDZnx+eNzDNsO6dxSF+u0rUfJ/3zK9Jg6YFx1UySKA9aMm/h3mLdK9eIFi/jebHT'
b'FxGRUR8/F58Ob+OslbMmhQgOG3XktHghOtuxjs+fOlrDwcVAKrJvNCBKCkWChLBBPak4UACh'
b'GrO+YJIsImgmUHGRiLR4ilxsBqAjcLLon1WnCrSshvWyK/TQTLYiCET1CrViCIiWtsKK6KqG'
b'<KEY>'
| |
<W> element
"""
return self.nest(SsmlW(words=words, role=role, **kwargs))
@deprecated_method('w')
def ssml_w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.w(words=words, role=role, **kwargs)
class SsmlBreak(TwiML):
""" Adding a Pause in <Say> """
def __init__(self, **kwargs):
super(SsmlBreak, self).__init__(**kwargs)
self.name = 'break'
class Pay(TwiML):
""" <Pay> Twiml Verb """
def __init__(self, **kwargs):
super(Pay, self).__init__(**kwargs)
self.name = 'Pay'
def prompt(self, for_=None, error_type=None, card_type=None, attempt=None,
**kwargs):
"""
Create a <Prompt> element
:param for_: Name of the payment source data element
:param error_type: Type of error
:param card_type: Type of the credit card
:param attempt: Current attempt count
:param kwargs: additional attributes
:returns: <Prompt> element
"""
return self.nest(Prompt(
for_=for_,
error_type=error_type,
card_type=card_type,
attempt=attempt,
**kwargs
))
def parameter(self, name=None, value=None, **kwargs):
"""
Create a <Parameter> element
:param name: The name of the custom parameter
:param value: The value of the custom parameter
:param kwargs: additional attributes
:returns: <Parameter> element
"""
return self.nest(Parameter(name=name, value=value, **kwargs))
class Sms(TwiML):
""" <Sms> TwiML Noun """
def __init__(self, message, **kwargs):
super(Sms, self).__init__(**kwargs)
self.name = 'Sms'
self.value = message
class Reject(TwiML):
""" <Reject> TwiML Verb """
def __init__(self, **kwargs):
super(Reject, self).__init__(**kwargs)
self.name = 'Reject'
class Redirect(TwiML):
""" <Redirect> TwiML Verb """
def __init__(self, url, **kwargs):
super(Redirect, self).__init__(**kwargs)
self.name = 'Redirect'
self.value = url
class Record(TwiML):
""" <Record> TwiML Verb """
def __init__(self, **kwargs):
super(Record, self).__init__(**kwargs)
self.name = 'Record'
class Queue(TwiML):
""" <Queue> TwiML Noun """
def __init__(self, name, **kwargs):
super(Queue, self).__init__(**kwargs)
self.name = 'Queue'
self.value = name
class Leave(TwiML):
""" <Leave> TwiML Verb """
def __init__(self, **kwargs):
super(Leave, self).__init__(**kwargs)
self.name = 'Leave'
class Hangup(TwiML):
""" <Hangup> TwiML Verb """
def __init__(self, **kwargs):
super(Hangup, self).__init__(**kwargs)
self.name = 'Hangup'
class Gather(TwiML):
""" <Gather> TwiML Verb """
def __init__(self, **kwargs):
super(Gather, self).__init__(**kwargs)
self.name = 'Gather'
def say(self, message=None, voice=None, loop=None, language=None, **kwargs):
"""
Create a <Say> element
:param message: Message to say
:param voice: Voice to use
:param loop: Times to loop message
:param language: Message langauge
:param kwargs: additional attributes
:returns: <Say> element
"""
return self.nest(Say(message=message, voice=voice, loop=loop, language=language, **kwargs))
def pause(self, length=None, **kwargs):
"""
Create a <Pause> element
:param length: Length in seconds to pause
:param kwargs: additional attributes
:returns: <Pause> element
"""
return self.nest(Pause(length=length, **kwargs))
def play(self, url=None, loop=None, digits=None, **kwargs):
"""
Create a <Play> element
:param url: Media URL
:param loop: Times to loop media
:param digits: Play DTMF tones for digits
:param kwargs: additional attributes
:returns: <Play> element
"""
return self.nest(Play(url=url, loop=loop, digits=digits, **kwargs))
class Enqueue(TwiML):
""" <Enqueue> TwiML Noun """
def __init__(self, name=None, **kwargs):
super(Enqueue, self).__init__(**kwargs)
self.name = 'Enqueue'
if name:
self.value = name
def task(self, body, priority=None, timeout=None, **kwargs):
"""
Create a <Task> element
:param body: TaskRouter task attributes
:param priority: Task priority
:param timeout: Timeout associated with task
:param kwargs: additional attributes
:returns: <Task> element
"""
return self.nest(Task(body, priority=priority, timeout=timeout, **kwargs))
class Task(TwiML):
""" <Task> TwiML Noun """
def __init__(self, body, **kwargs):
super(Task, self).__init__(**kwargs)
self.name = 'Task'
self.value = body
class Echo(TwiML):
""" <Echo> TwiML Verb """
def __init__(self, **kwargs):
super(Echo, self).__init__(**kwargs)
self.name = 'Echo'
class Dial(TwiML):
""" <Dial> TwiML Verb """
def __init__(self, number=None, **kwargs):
super(Dial, self).__init__(**kwargs)
self.name = 'Dial'
if number:
self.value = number
def client(self, identity=None, url=None, method=None,
status_callback_event=None, status_callback=None,
status_callback_method=None, **kwargs):
"""
Create a <Client> element
:param identity: Client identity
:param url: Client URL
:param method: Client URL Method
:param status_callback_event: Events to trigger status callback
:param status_callback: Status Callback URL
:param status_callback_method: Status Callback URL Method
:param kwargs: additional attributes
:returns: <Client> element
"""
return self.nest(Client(
identity=identity,
url=url,
method=method,
status_callback_event=status_callback_event,
status_callback=status_callback,
status_callback_method=status_callback_method,
**kwargs
))
def conference(self, name, muted=None, beep=None,
start_conference_on_enter=None, end_conference_on_exit=None,
wait_url=None, wait_method=None, max_participants=None,
record=None, region=None, coach=None, trim=None,
status_callback_event=None, status_callback=None,
status_callback_method=None, recording_status_callback=None,
recording_status_callback_method=None,
recording_status_callback_event=None, event_callback_url=None,
jitter_buffer_size=None, participant_label=None, **kwargs):
"""
Create a <Conference> element
:param name: Conference name
:param muted: Join the conference muted
:param beep: Play beep when joining
:param start_conference_on_enter: Start the conference on enter
:param end_conference_on_exit: End the conferenceon exit
:param wait_url: Wait URL
:param wait_method: Wait URL method
:param max_participants: Maximum number of participants
:param record: Record the conference
:param region: Conference region
:param coach: Call coach
:param trim: Trim the conference recording
:param status_callback_event: Events to call status callback URL
:param status_callback: Status callback URL
:param status_callback_method: Status callback URL method
:param recording_status_callback: Recording status callback URL
:param recording_status_callback_method: Recording status callback URL method
:param recording_status_callback_event: Recording status callback events
:param event_callback_url: Event callback URL
:param jitter_buffer_size: Size of jitter buffer for participant
:param participant_label: A label for participant
:param kwargs: additional attributes
:returns: <Conference> element
"""
return self.nest(Conference(
name,
muted=muted,
beep=beep,
start_conference_on_enter=start_conference_on_enter,
end_conference_on_exit=end_conference_on_exit,
wait_url=wait_url,
wait_method=wait_method,
max_participants=max_participants,
record=record,
region=region,
coach=coach,
trim=trim,
status_callback_event=status_callback_event,
status_callback=status_callback,
status_callback_method=status_callback_method,
recording_status_callback=recording_status_callback,
recording_status_callback_method=recording_status_callback_method,
recording_status_callback_event=recording_status_callback_event,
event_callback_url=event_callback_url,
jitter_buffer_size=jitter_buffer_size,
participant_label=participant_label,
**kwargs
))
def number(self, phone_number, send_digits=None, url=None, method=None,
status_callback_event=None, status_callback=None,
status_callback_method=None, byoc=None, **kwargs):
"""
Create a <Number> element
:param phone_number: Phone Number to dial
:param send_digits: DTMF tones to play when the call is answered
:param url: TwiML URL
:param method: TwiML URL method
:param status_callback_event: Events to call status callback
:param status_callback: Status callback URL
:param status_callback_method: Status callback URL method
:param byoc: BYOC trunk SID (Beta)
:param kwargs: additional attributes
:returns: <Number> element
"""
return self.nest(Number(
phone_number,
send_digits=send_digits,
url=url,
method=method,
status_callback_event=status_callback_event,
status_callback=status_callback,
status_callback_method=status_callback_method,
byoc=byoc,
**kwargs
))
def queue(self, name, url=None, method=None, reservation_sid=None,
post_work_activity_sid=None, **kwargs):
"""
Create a <Queue> element
:param name: Queue name
:param url: Action URL
:param method: Action URL method
:param reservation_sid: TaskRouter Reservation SID
:param post_work_activity_sid: TaskRouter Activity SID
:param kwargs: additional attributes
:returns: <Queue> element
"""
return self.nest(Queue(
name,
url=url,
method=method,
reservation_sid=reservation_sid,
post_work_activity_sid=post_work_activity_sid,
**kwargs
))
def sim(self, sim_sid, **kwargs):
"""
Create a <Sim> element
:param sim_sid: SIM SID
:param kwargs: additional attributes
:returns: <Sim> element
"""
return self.nest(Sim(sim_sid, **kwargs))
def sip(self, sip_url, username=None, password=None, url=None, method=None,
status_callback_event=None, status_callback=None,
status_callback_method=None, **kwargs):
"""
Create a <Sip> element
:param sip_url: SIP URL
:param username: SIP Username
:param password: SIP Password
:param url: Action URL
:param method: Action URL method
:param status_callback_event: Status callback events
:param status_callback: Status callback URL
:param status_callback_method: Status callback URL method
:param kwargs: additional attributes
:returns: <Sip> element
"""
return self.nest(Sip(
sip_url,
username=username,
password=password,
url=url,
method=method,
status_callback_event=status_callback_event,
status_callback=status_callback,
status_callback_method=status_callback_method,
**kwargs
))
class Sip(TwiML):
""" <Sip> TwiML Noun """
def __init__(self, sip_url, **kwargs):
super(Sip, self).__init__(**kwargs)
self.name = 'Sip'
self.value = sip_url
class Sim(TwiML):
""" <Sim> TwiML Noun """
def __init__(self, sim_sid, **kwargs):
super(Sim, self).__init__(**kwargs)
self.name = 'Sim'
self.value = sim_sid
class Number(TwiML):
""" <Number> TwiML Noun """
def __init__(self, phone_number, **kwargs):
super(Number, self).__init__(**kwargs)
self.name = 'Number'
self.value = phone_number
class Conference(TwiML):
""" <Conference> TwiML Noun """
def __init__(self, name, **kwargs):
super(Conference, self).__init__(**kwargs)
self.name = 'Conference'
self.value = name
class Client(TwiML):
""" <Client> TwiML Noun """
def __init__(self, identity=None, **kwargs):
super(Client, self).__init__(**kwargs)
self.name = 'Client'
if identity:
self.value = identity
def identity(self, client_identity, **kwargs):
"""
Create a <Identity> element
:param client_identity: Identity of the client to dial
:param kwargs: additional attributes
:returns: <Identity> element
"""
return self.nest(Identity(client_identity, **kwargs))
def parameter(self, name=None, value=None, **kwargs):
"""
Create a <Parameter> element
:param name: The name of the custom parameter
:param value: The value of the custom parameter
:param kwargs: additional attributes
:returns: <Parameter> element
"""
return self.nest(Parameter(name=name, value=value, **kwargs))
class Identity(TwiML):
""" <Identity> TwiML Noun """
def __init__(self, client_identity, **kwargs):
super(Identity, self).__init__(**kwargs)
self.name = 'Identity'
self.value = client_identity
class Connect(TwiML):
""" <Connect> TwiML Verb """
def __init__(self, **kwargs):
super(Connect, self).__init__(**kwargs)
self.name = 'Connect'
def room(self, name, participant_identity=None, **kwargs):
"""
Create a <Room> element
:param name: Room name
:param participant_identity: Participant identity when connecting to the Room
:param kwargs: additional attributes
:returns: <Room> element
"""
return self.nest(Room(name, participant_identity=participant_identity, **kwargs))
def autopilot(self, name, **kwargs):
"""
Create a <Autopilot> element
:param name: Autopilot assistant sid or unique name
:param kwargs: additional attributes
:returns: <Autopilot> element
"""
return self.nest(Autopilot(name, **kwargs))
def stream(self, name=None, connector_name=None, url=None, track=None,
status_callback=None, status_callback_method=None, **kwargs):
"""
Create a <Stream> element
:param name: Friendly name given to the Stream
:param | |
# Copyright 2014 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import json
import base64
from f5.common.logger import Log
from f5.common import constants as const
from f5.bigip import exceptions
from f5.bigip.interfaces import log
# Management - Device
class Device(object):
def __init__(self, bigip):
self.bigip = bigip
self.bigip.icontrol.add_interfaces(['Management.Trust'])
self.mgmt_trust = self.bigip.icontrol.Management.Trust
# create empty lock instance ID
self.lock = None
self.devicename = None
@log
def get_device_name(self):
""" Get device name """
if not self.devicename:
request_url = self.bigip.icr_url + '/cm/device'
request_filter = '/?$select=name,selfDevice'
request_filter += '&filter partition eq Common'
request_url += request_filter
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
devices = response_obj['items']
for device in devices:
if device['selfDevice'] == 'true':
self.devicename = device['name']
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return self.devicename
@log
def get_all_device_names(self):
""" Get all device name """
request_url = self.bigip.icr_url + '/cm/device'
request_filter = '/?$select=name&filter partition eq Common'
request_url += request_filter
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
devices = response_obj['items']
device_names = []
for device in devices:
device_names.append(device['name'])
return device_names
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return []
@log
def get_lock(self):
""" Get device lock """
current_lock = self._get_lock()
new_lock = int(time.time())
if current_lock:
if (new_lock - current_lock) > const.CONNECTION_TIMEOUT:
Log.info('Device', 'Locking device %s with lock %s'
% (self.get_device_name(), new_lock))
self._set_lock(new_lock)
return True
else:
return False
else:
Log.info('Device', 'Locking device %s with lock %s'
% (self.get_device_name(), new_lock))
self._set_lock(int(time.time()))
return True
@log
def release_lock(self):
""" Release device lock """
current_lock = self._get_lock()
if current_lock == self.lock:
Log.info('Device', 'Releasing device lock for %s'
% self.get_device_name())
self._set_lock(None)
return True
else:
Log.info('Device', 'Device has foreign lock instance on %s '
% self.get_device_name() + ' with lock %s '
% current_lock)
return False
def _get_lock(self):
""" Get device lock """
request_url = self.bigip.icr_url + '/cm/device'
request_url += '?$select=selfDevice,comment'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
current_lock = ''
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
devices = response_obj['items']
for device in devices:
if device['selfDevice']:
if 'comment' in device:
current_lock = device['comment']
if current_lock.startswith(const.DEVICE_LOCK_PREFIX):
return int(current_lock.replace(const.DEVICE_LOCK_PREFIX, ''))
def _set_lock(self, lock):
""" Set device lock """
dev_name = self.get_device_name()
if lock:
self.lock = lock
lock_comment = const.DEVICE_LOCK_PREFIX + str(lock)
else:
lock_comment = ''
request_url = self.bigip.icr_url + '/cm/device/'
request_url += '~Common~' + dev_name
payload = dict()
payload['comment'] = lock_comment
response = self.bigip.icr_session.patch(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return True
return False
@log
def get_mgmt_addr(self):
""" Get device management ip """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_filter = '/?$select=managementIp'
request_url += request_filter
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
return response_obj['managementIp']
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def get_all_mgmt_addrs(self):
""" Get device management ips """
request_url = self.bigip.icr_url + '/cm/device'
request_url += '/?$select=managementIp'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
mgmt_addrs = []
for device in response_obj['items']:
mgmt_addrs.append(device['managementIp'])
return mgmt_addrs
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def get_mgmt_addr_by_device(self, devicename):
request_url = self.bigip.icr_url + '/cm/device'
request_url += '/?$select=managementIp,name'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
for device in response_obj['items']:
if device['name'] == devicename:
return device['managementIp']
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def get_configsync_addr(self):
""" Get device config sync ip """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_url += '/?$select=configsyncIp'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
return response_obj['configsyncIp']
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def set_configsync_addr(self, ip_address=None, folder='/Common'):
""" Set device config sync ip """
dev_name = self.get_device_name()
request_url = self.bigip.icr_url + '/cm/device/'
request_url += '~Common~' + dev_name
payload = dict()
if not ip_address:
payload['configsyncIp'] = None
else:
payload['configsyncIp'] = ip_address
response = self.bigip.icr_session.patch(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return True
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return False
@log
def get_primary_mirror_addr(self):
""" Get device primary mirror ip """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_url += '/?$select=mirrorIp'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
primary = response_obj['mirrorIp']
if primary == 'any6':
return None
else:
return primary
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def get_secondary_mirror_addr(self):
""" Get device secondary mirror ip """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_url += '/?$select=mirrorSecondaryIp'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
secondary = response_obj['mirrorSecondaryIp']
if secondary == 'any6':
return None
else:
return secondary
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def set_primary_mirror_addr(self, ip_address=None, folder='/Common'):
""" Set device primary mirror ip """
dev_name = self.get_device_name()
request_url = self.bigip.icr_url + '/cm/device/'
request_url += '~Common~' + dev_name
payload = dict()
if not ip_address:
payload['mirrorIp'] = '::'
else:
payload['mirrorIp'] = ip_address
response = self.bigip.icr_session.patch(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return True
else:
Log.error('device', response.text)
raise exceptions.DeviceUpdateException(response.text)
return False
@log
def set_secondary_mirror_addr(self, ip_address=None, folder='/Common'):
""" Set device secondary mirror ip """
dev_name = self.get_device_name()
request_url = self.bigip.icr_url + '/cm/device/'
request_url += '~Common~' + dev_name
payload = dict()
if not ip_address:
payload['mirrorSecondaryIp'] = '::'
else:
payload['mirrorSecondaryIp'] = ip_address
response = self.bigip.icr_session.patch(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return True
else:
Log.error('device', response.text)
raise exceptions.DeviceUpdateException(response.text)
return False
@log
def get_failover_addrs(self):
""" Get device failover ips """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_url += '/?$select=unicastAddress'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
return_addresses = []
if 'unicastAddress' in response_obj:
uas = response_obj['unicastAddress']
for ua in uas:
return_addresses.append(ua['ip'])
return return_addresses
else:
return []
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return []
@log
def set_failover_addrs(self, ip_addrs=None, folder='/Common'):
""" Get device failover ips """
dev_name = self.get_device_name()
dev_ip = self.get_mgmt_addr()
request_url = self.bigip.icr_url + '/cm/device/'
request_url += '~Common~' + dev_name
payload = dict()
unicast_addresses = []
if len(ip_addrs):
unicast_addresses.append({'effectiveIp': dev_ip,
'effectivePort': 1026,
'ip': dev_ip,
'port': 1026})
for ip_address in ip_addrs:
unicast_addresses.append({'effectiveIp': ip_address,
'effectivePort': 1026,
'ip': ip_address,
'port': 1026})
payload['unicastAddress'] = unicast_addresses
response = self.bigip.icr_session.patch(
request_url, data=json.dumps(payload),
timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
return True
else:
Log.error('device', response.text)
raise exceptions.DeviceUpdateException(response.text)
return False
@log
def get_failover_state(self):
""" Get device failover state """
request_url = self.bigip.icr_url + '/cm/device/~Common'
request_url += '~' + self.get_device_name()
request_url += '/?$select=failoverState'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
return response_obj['failoverState']
else:
Log.error('device', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def get_device_group(self):
""" Get device group """
request_url = self.bigip.icr_url + '/cm/device-group'
request_url += '/?$select=name,type'
response = self.bigip.icr_session.get(
request_url, timeout=const.CONNECTION_TIMEOUT)
if response.status_code < 400:
response_obj = json.loads(response.text)
if 'items' in response_obj:
dsgs = response_obj['items']
for dsg in dsgs:
if dsg['type'] == 'sync-failover':
return dsg['name']
return None
elif response.status_code == 404:
return None
else:
Log.error('device-group', response.text)
raise exceptions.DeviceQueryException(response.text)
return None
@log
def remove_from_device_group(self, name=None, folder='/Common'):
""" Remove device from group """
device_group = self.get_device_group()
if device_group:
return self.bigip.cluster.remove_devices(device_group,
[self.get_device_name()])
@log
def remove_all_peers(self):
""" Remove all peers from group """
self.bigip.system.set_folder('/Common')
current_dev_name = self.get_device_name()
devs_to_remove = []
for dev in self.get_all_device_names():
if dev != current_dev_name:
devs_to_remove.append(dev)
if devs_to_remove:
try:
self.mgmt_trust.remove_device(devs_to_remove)
except Exception as e:
Log.error('device', e.message)
raise exceptions.DeviceUpdateException(e.message)
self.remove_metadata(None, {
'root_device_name': None,
'root_device_mgmt_address': None})
@log
def reset_trust(self, new_name):
""" Remove trust """
self.bigip.system.set_folder('/Common')
self.remove_all_peers()
try:
self.mgmt_trust.reset_all(new_name, False, '', '')
except Exception as e:
Log.error('device', e.message)
raise exceptions.DeviceUpdateException(e.message)
self.remove_metadata(None, {
'root_device_name': None,
'root_device_mgmt_address': None})
self.devicename = None
self.get_device_name()
@log
def set_metadata(self, name=None, device_dict=None):
""" Set device metadata """
if not name:
name = self.get_device_name()
if isinstance(device_dict, | |
<gh_stars>1-10
#!/usr/bin/env python
'''
@author <NAME> <<EMAIL>>
@file ion/processes/data/ingestion/science_granule_ingestion_worker.py
@date 06/26/12 11:38
@description Ingestion Process
'''
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from ion.services.dm.utility.granule_utils import time_series_domain
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from pyon.core.exception import CorruptionError, NotFound, BadRequest
from pyon.ion.event import handle_stream_exception, EventPublisher
from pyon.ion.event import EventSubscriber
from pyon.public import log, RT, PRED, CFG, OT
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from interface.objects import Granule
from ion.core.process.transform import TransformStreamListener, TransformStreamProcess
from ion.util.time_utils import TimeUtils
from ion.util.stored_values import StoredValueManager
from interface.services.dm.iingestion_worker import BaseIngestionWorker
from pyon.ion.stream import StreamSubscriber
from gevent.coros import RLock
from gevent import event
from coverage_model.parameter_values import SparseConstantValue
from coverage_model import SparseConstantType
from coverage_model import NumpyParameterData, ConstantOverTime
from coverage_model.parameter_types import CategoryType, RecordType
from ooi.timer import Timer, Accumulator
from ooi.logging import TRACE
from logging import DEBUG
import collections
import gevent
import time
import uuid
import numpy as np
from gevent.queue import Queue
numpy_walk = DatasetManagementService.numpy_walk
REPORT_FREQUENCY=100
MAX_RETRY_TIME=3600
class ScienceGranuleIngestionWorker(TransformStreamListener, BaseIngestionWorker):
CACHE_LIMIT=CFG.get_safe('container.ingestion_cache',5)
def __init__(self, *args,**kwargs):
TransformStreamListener.__init__(self, *args, **kwargs)
BaseIngestionWorker.__init__(self, *args, **kwargs)
#--------------------------------------------------------------------------------
# Ingestion Cache
# - Datasets
# - Coverage instances
#--------------------------------------------------------------------------------
self._datasets = collections.OrderedDict()
self._coverages = collections.OrderedDict()
self._bad_coverages = {}
self.time_stats = Accumulator(format='%3f')
# unique ID to identify this worker in log msgs
self._id = uuid.uuid1()
def on_start(self): #pragma no cover
#--------------------------------------------------------------------------------
# Explicit on_start
#--------------------------------------------------------------------------------
# Skip TransformStreamListener and go to StreamProcess to avoid the subscriber being created
# We want explicit management of the thread and subscriber object for ingestion
TransformStreamProcess.on_start(self)
self.queue_name = self.CFG.get_safe('process.queue_name',self.id)
self.subscriber = StreamSubscriber(process=self, exchange_name=self.queue_name, callback=self.receive_callback)
self.thread_lock = RLock()
#--------------------------------------------------------------------------------
# Normal on_start after this point
#--------------------------------------------------------------------------------
BaseIngestionWorker.on_start(self)
self._rpc_server = self.container.proc_manager._create_listening_endpoint(from_name=self.id, process=self)
self.add_endpoint(self._rpc_server)
self.event_publisher = EventPublisher(OT.DatasetModified)
self.stored_value_manager = StoredValueManager(self.container)
self.lookup_docs = self.CFG.get_safe('process.lookup_docs',[])
self.input_product = self.CFG.get_safe('process.input_product','')
self.new_lookups = Queue()
self.lookup_monitor = EventSubscriber(event_type=OT.ExternalReferencesUpdatedEvent, callback=self._add_lookups, auto_delete=True)
self.add_endpoint(self.lookup_monitor)
self.connection_id = ''
self.connection_index = None
self.start_listener()
def on_quit(self): #pragma no cover
self.event_publisher.close()
if self.subscriber_thread:
self.stop_listener()
for stream, coverage in self._coverages.iteritems():
try:
coverage.close(timeout=5)
except:
log.exception('Problems closing the coverage')
self._coverages.clear()
TransformStreamListener.on_quit(self)
BaseIngestionWorker.on_quit(self)
def start_listener(self):
# We use a lock here to prevent possible race conditions from starting multiple listeners and coverage clobbering
with self.thread_lock:
self.subscriber_thread = self._process.thread_manager.spawn(self.subscriber.listen, thread_name='%s-subscriber' % self.id)
def stop_listener(self):
# Avoid race conditions with coverage operations (Don't start a listener at the same time as closing one)
with self.thread_lock:
self.subscriber.close()
self.subscriber_thread.join(timeout=10)
for stream, coverage in self._coverages.iteritems():
try:
coverage.close(timeout=5)
except:
log.exception('Problems closing the coverage')
self._coverages.clear()
self.subscriber_thread = None
def pause(self):
if self.subscriber_thread is not None:
self.stop_listener()
def resume(self):
if self.subscriber_thread is None:
self.start_listener()
def _add_lookups(self, event, *args, **kwargs):
if event.origin == self.input_product:
if isinstance(event.reference_keys, list):
self.new_lookups.put(event.reference_keys)
def _new_dataset(self, stream_id):
'''
Adds a new dataset to the internal cache of the ingestion worker
'''
rr_client = self.container.resource_registry
datasets, _ = rr_client.find_subjects(subject_type=RT.Dataset,predicate=PRED.hasStream,object=stream_id,id_only=True)
if datasets:
return datasets[0]
return None
def _get_data_products(self, dataset_id):
rr_client = self.container.resource_registry
data_products, _ = rr_client.find_subjects(object=dataset_id, predicate=PRED.hasDataset, subject_type=RT.DataProduct, id_only=False)
return data_products
#--------------------------------------------------------------------------------
# Metadata Handlers
#--------------------------------------------------------------------------------
def initialize_metadata(self, dataset_id, rdt):
'''
Initializes a metadata document in the object store. The document
contains information about the bounds and extents of the dataset as
well other metadata to improve performance.
'''
object_store = self.container.object_store
key = dataset_id
bounds = {}
extents = {}
last_values = {}
rough_size = 0
for k,v in rdt.iteritems():
v = v[:].flatten()
if v.dtype.char not in ('S', 'O', 'U', 'V'):
bounds[k] = (np.min(v), np.max(v))
last_values[k] = v[-1]
extents[k] = len(rdt)
rough_size += len(rdt) * 4
doc = {'bounds':bounds, 'extents':extents, 'last_values':last_values, 'size': rough_size}
doc = numpy_walk(doc)
object_store.create_doc(doc, object_id=key)
return
def update_metadata(self, dataset_id, rdt):
'''
Updates the metada document with the latest information available
'''
self.update_data_product_metadata(dataset_id, rdt)
# Grab the document
object_store = self.container.object_store
key = dataset_id
try:
doc = object_store.read_doc(key)
except NotFound:
return self.initialize_metadata(dataset_id, rdt)
# These are the fields we're interested in
bounds = doc['bounds']
extents = doc['extents']
last_values = doc['last_values']
rough_size = doc['size']
for k,v in rdt.iteritems():
if k not in bounds:
continue
v = v[:].flatten() # Get the numpy representation (dense array).
if v.dtype.char not in ('S', 'O', 'U', 'V'):
l_min = np.min(v)
l_max = np.max(v)
o_min, o_max = bounds[k]
bounds[k] = (min(l_min, o_min), max(l_max, o_max))
last_values[k] = v[-1]
# Update the bounds
# Increase the extents
extents[k] = extents[k] + len(rdt)
# How about the last value?
rough_size += len(rdt) * 4
doc['size'] = rough_size
# Sanitize it
doc = numpy_walk(doc)
object_store.update_doc(doc)
def update_data_product_metadata(self, dataset_id, rdt):
data_products = self._get_data_products(dataset_id)
for data_product in data_products:
self.update_time(data_product, rdt[rdt.temporal_parameter][:])
self.update_geo(data_product, rdt)
try:
self.container.resource_registry.update(data_product)
except: # TODO: figure out WHICH Exception gets raised here when the bounds are off
log.error("Problem updating the data product metadata", exc_info=True)
# Carry on :(
def update_time(self, data_product, t):
'''
Sets the nominal_datetime for a data product correctly
Accounts for things like NTP and out of order data
'''
t0, t1 = self.get_datetime_bounds(data_product)
#TODO: Account for non NTP-based timestamps
min_t = np.min(t) - 2208988800
max_t = np.max(t) - 2208988800
if t0:
t0 = min(t0, min_t)
else:
t0 = min_t
if t1:
t1 = max(t1, max_t)
else:
t1 = max_t
if t0 > t1:
log.error("This should never happen but t0 > t1")
data_product.nominal_datetime.start_datetime = float(t0)
data_product.nominal_datetime.end_datetime = float(t1)
def get_datetime(self, nominal_datetime):
'''
Returns a floating point value for the datetime or None if it's an
empty string
'''
t = None
# So normally this is a string
if isinstance(nominal_datetime, (float, int)):
t = nominal_datetime # simple enough
elif isinstance(nominal_datetime, basestring):
if nominal_datetime: # not an empty string
# Try to convert it to a float
try:
t = float(nominal_datetime)
except ValueError:
pass
return t
def get_datetime_bounds(self, data_product):
'''Returns the min and max for the bounds in the nominal_datetime
attr
'''
t0 = self.get_datetime(data_product.nominal_datetime.start_datetime)
t1 = self.get_datetime(data_product.nominal_datetime.end_datetime)
return (t0, t1)
def update_geo(self, data_product, rdt):
'''
Finds the maximum bounding box
'''
lat = None
lon = None
for p in rdt:
if rdt._rd[p] is None:
continue
# TODO: Not an all encompassing list of acceptable names for lat and lon
if p.lower() in ('lat', 'latitude', 'y_axis'):
lat = np.asscalar(rdt[p][-1])
elif p.lower() in ('lon', 'longitude', 'x_axis'):
lon = np.asscalar(rdt[p][-1])
if lat and lon:
break
if lat and lon:
data_product.geospatial_bounds.geospatial_latitude_limit_north = lat
data_product.geospatial_bounds.geospatial_latitude_limit_south = lat
data_product.geospatial_bounds.geospatial_longitude_limit_east = lon
data_product.geospatial_bounds.geospatial_longitude_limit_west = lon
#--------------------------------------------------------------------------------
# Cache managemnt
#--------------------------------------------------------------------------------
def get_dataset(self,stream_id):
'''
Memoization (LRU) of _new_dataset
'''
try:
result = self._datasets.pop(stream_id)
except KeyError:
result = self._new_dataset(stream_id)
if result is None:
return None
if len(self._datasets) >= self.CACHE_LIMIT:
self._datasets.popitem(0)
self._datasets[stream_id] = result
return result
def get_coverage(self, stream_id):
'''
Memoization (LRU) of _get_coverage
'''
try:
result = self._coverages.pop(stream_id)
except KeyError:
dataset_id = self.get_dataset(stream_id)
if dataset_id is None:
return None
result = DatasetManagementService._get_simplex_coverage(dataset_id, mode='a')
if result is None:
return None
if len(self._coverages) >= self.CACHE_LIMIT:
k, coverage = self._coverages.popitem(0)
coverage.close(timeout=5)
self._coverages[stream_id] = result
return result
#--------------------------------------------------------------------------------
# Granule Parsing and Handling
#--------------------------------------------------------------------------------
@handle_stream_exception()
def recv_packet(self, msg, stream_route, stream_id):
'''
The consumer callback to parse and manage the granule.
The message is ACK'd once the function returns
'''
log.trace('received granule for stream %s', stream_id)
if msg == {}:
log.error('Received empty message from stream: %s', stream_id)
return
# Message validation
if not isinstance(msg, Granule):
log.error('Ingestion received a message that is not a granule: %s', msg)
return
rdt = RecordDictionaryTool.load_from_granule(msg)
if rdt is None:
log.error('Invalid granule (no RDT) for stream %s', stream_id)
return
if not len(rdt):
log.debug('Empty granule for stream %s', stream_id)
return
self.persist_or_timeout(stream_id, rdt)
def persist_or_timeout(self, stream_id, rdt):
'''
A loop that tries to parse and store a granule for up to five minutes,
and waits an increasing amount of time each iteration.
'''
done = False
timeout = 2
start = time.time()
while not done:
if self.parse_granule(stream_id, rdt, start, done):
return # We're all done, everything worked
if (time.time() - start) > MAX_RETRY_TIME: # After a while, give up
dataset_id = self.get_dataset(stream_id)
log.error("We're giving up, the coverage needs to be inspected %s", DatasetManagementService._get_coverage_path(dataset_id))
raise
if stream_id in self._coverages:
log.info('Popping coverage for stream %s', stream_id)
self._coverages.pop(stream_id)
gevent.sleep(timeout)
timeout = min(60 * 5, timeout * 2)
def parse_granule(self, stream_id, rdt, start, done):
try:
self.add_granule(stream_id, rdt)
return True
except Exception as e:
log.exception('An issue with coverage, retrying after a bit')
return False
return True # never reaches here, Added for clarity
def dataset_changed(self, dataset_id, window):
self.event_publisher.publish_event(origin=dataset_id, author=self.id, window=window)
def build_data_dict(self, rdt):
np_dict = {}
time_array = rdt[rdt.temporal_parameter]
| |
<reponame>ManchesterBioinference/burstInfer
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 18:00:10 2020
@author: Jon
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 01:33:52 2020
@author: jbowl
"""
import scipy
import numpy as np
import datetime
print(datetime.datetime.now().time())
print('start program')
from numba import jit
from burstInfer.ms2_loading_coeff import ms2_loading_coeff
#from calcObservationLikelihood import calcObservationLikelihood
from burstInfer.v_log_solve import v_log_solve
from burstInfer.log_sum_exp import log_sum_exp
#%%
@jit(nopython=True)
def get_adjusted(state, K, W, ms2_coeff):
#ms2_coeff_flipped = np.flip(ms2_coeff_flipped, 1)
ms2_coeff_flipped = ms2_coeff
one_accumulator = 0
zero_accumulator = 0
for count in np.arange(0,W):
#print(count)
#print(state&1)
if state & 1 == 1:
#print('one')
one_accumulator = one_accumulator + ms2_coeff_flipped[0,count]
else:
#print('zero')
zero_accumulator = zero_accumulator + ms2_coeff_flipped[0,count]
state = state >> 1
#print(state)
return_list = []
return_list.append(one_accumulator)
return_list.append(zero_accumulator)
return return_list
@jit(nopython=True)
def logsumexp_numba(X):
r = 0.0
for x in X:
r += np.exp(x)
return np.log(r)
def get_posterior_long_v2(initialised_parameters, n_steps, n_traces, signal_struct, compound_states, K, PERMITTED_MEMORY,
W, eps, seed_setter, kappa):
A_log = np.log(initialised_parameters['A'])
noise_temp = initialised_parameters['noise']
lambda_log = -2 * np.log(noise_temp)
pi0_log = np.log(initialised_parameters['pi0'])
v = initialised_parameters['v']
v_logs = np.log(v)
# MS2 coefficient calculation
ms2_coeff = ms2_loading_coeff(kappa, W)
ms2_coeff_flipped = np.flip(ms2_coeff, 1)
count_reduction_manual = np.zeros((1,W-1))
for t in np.arange(0,W-1):
count_reduction_manual[0,t] = np.sum(ms2_coeff[0,t+1:])
count_reduction_manual = np.reshape(count_reduction_manual, (W-1,1))
logL_tot = np.full((1, n_steps), np.NINF)
fluo_length_total = 0
for gh in signal_struct:
fluo_length_total = fluo_length_total + len(np.transpose(gh))
one_more = 0
log_likelihoods = np.full((1, n_traces), np.NINF)
for i_tr in np.arange(0, n_traces):
log_likelihoods[0, i_tr] = np.NINF
logL_tot = np.full((1, n_steps), np.NINF)
@jit(nopython=True)
def calcObservationLikelihood(lambda_logF, noise_tempF, dataF, veef,
INPUT_STATE, K, W, ms2_coeff_flipped):
adjusted_list = get_adjusted(INPUT_STATE, K, W, ms2_coeff)
eta = 0.5 * (lambda_logF - np.log(2*np.pi)) - 0.5 * \
(1 / noise_tempF**2) * (dataF - (adjusted_list[1] * veef[0, 0] \
+ adjusted_list[0] * veef[1, 0]))**2
#print(eta)
return eta
@jit(nopython=True)
def compute_dynamic_F(state, length, W, K, ms2_coeff_flipped, count_reduction_manual):
#print(datetime.datetime.now().time())
trace_length = length
state_flipped = K**W - state - 1
adjust = get_adjusted(state_flipped, K, W, ms2_coeff)
adjust_ones = adjust[0]
adjust_zeros = adjust[1]
F1_log = np.log(adjust_ones)
F0_log = np.log(adjust_zeros)
log_f0_terms = np.zeros((1, trace_length))
for i in np.arange(0, trace_length):
log_f0_terms[0,i] = F0_log
log_f1_terms_saved = np.zeros((1, trace_length))
for i in np.arange(0, trace_length):
log_f1_terms_saved[0,i] = F1_log
#log_f1_terms_saved2 = log_f1_terms_saved
for t in np.arange(0,W-1):
#print('top')
#print(np.exp(log_f1_terms_saved[0,t]))
#print('bottom')
#print(count_reduction_manual[t,])
#print(abs(float(np.exp(log_f1_terms_saved[0,t])) - count_reduction_manual[t,]))
inter = float(np.exp(log_f1_terms_saved[0,t])) - count_reduction_manual[t,]
log_f1_terms_saved[0,t] = np.log(abs(inter[0,]))
log_F_terms = []
log_F_terms.append(log_f1_terms_saved)
log_F_terms.append(log_f0_terms)
#print(datetime.datetime.now().time())
return log_F_terms
p_z_log_soft = {}
for baum_welch in range(n_steps):
print('baum_welch: ')
print(baum_welch)
logL_tot[0, baum_welch] = 0
# Declare EM terms
pi0_terms = np.full((1, K), np.NINF)
A_terms = np.full((K, K), np.NINF)
lambda_terms = np.NINF
v_M_terms = np.full((K, K), np.NINF)
v_b_terms_log = np.full((1, K), np.NINF)
v_b_terms_sign = np.ones((1, K))
#trace_adder = 0
for i_tr in range(n_traces):
print(i_tr)
print(datetime.datetime.now())
print('start trace')
data = signal_struct[i_tr]
trace_length = len(np.transpose(data))
states_container = []
off_off = A_log[0, 0]
off_on = A_log[1, 0]
on_off = A_log[0, 1]
on_on = A_log[1, 1]
pi0_log = np.reshape(pi0_log, (2,1))
v = np.reshape(v, (2,1))
fluo_logs_abs = np.log(np.abs(data))
x_term_logs = fluo_logs_abs
xsign = np.sign(data)
x_term_signs = xsign
#compound_states_vector = np.arange(0, compound_states)
#compound_states_vector = np.int32(compound_states_vector)
# Complete first two 'anomalous' steps manually
# Step One
t = 0
expansion_counter = 0
RAM = 2
updater = tuple([[], [0, \
1], [pi0_log[0, 0], \
pi0_log[1, 0]], \
[pi0_log[0, 0] + \
calcObservationLikelihood(lambda_log, noise_temp, \
data[0, 0], v, 0, K, W, ms2_coeff_flipped), \
pi0_log[1, 0] + \
calcObservationLikelihood(lambda_log, noise_temp,
data[0, 0], v, 1, K, W, ms2_coeff_flipped)],
[]])
states_container.append(updater)
# Step Two
t = 1
expansion_counter = 1
RAM = 4
new_alphas = [states_container[0][3][0] + off_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, 1], v, 0, K, W, ms2_coeff_flipped),
states_container[0][3][0] + off_on + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, 1], v, 1, K, W, ms2_coeff_flipped),
states_container[0][3][1] + on_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, 1], v, 2, K, W, ms2_coeff_flipped),
states_container[0][3][1] + on_on + calcObservationLikelihood(lambda_log,
noise_temp, data[0, 1], v, 3, K, W, ms2_coeff_flipped)]
updater = tuple([[0, 1],
[0, 1,
2, 3],
[off_off, off_on, on_off, on_on], new_alphas, [0, 0, 1, 1]])
states_container.append(updater)
#%%
# Expansion Phase
print(datetime.datetime.now().time())
print('start forward')
while RAM < PERMITTED_MEMORY:
t = t + 1
expansion_counter = expansion_counter + 1
RAM = 2 * len(states_container[t-1][1])
previous_states = states_container[t-1][1]
previous_states2 = np.asarray(previous_states)
allowed_states = np.zeros((len(previous_states2), 2))
for i in range(len(previous_states2)):
allowed_states[i, 0] = previous_states2[i] << 1
allowed_states[i, 1] = (previous_states2[i] << 1) + 1
allowed_states = allowed_states.astype(int)
expanded_alphas = []
previous_alphas = states_container[t-1][3]
involved_transitions = []
for k in range(len(previous_states2)):
for i in np.arange(0, 2):
input_state = previous_states2[k,]
target_state = allowed_states[k, i]
for_counting = np.int64(target_state)
if input_state % 2 == 0 and target_state % 2 == 0:
expanded_alphas.append(previous_alphas[k] + off_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(off_off)
elif input_state % 2 == 0 and target_state % 2 != 0:
expanded_alphas.append(previous_alphas[k] + off_on + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(off_on)
elif input_state % 2 != 0 and target_state % 2 == 0:
expanded_alphas.append(previous_alphas[k] + on_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(on_off)
elif input_state % 2 != 0 and target_state % 2 != 0:
expanded_alphas.append(previous_alphas[k] + on_on + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(on_on)
old_states = list(previous_states2)
present_states = np.reshape(allowed_states, (2*len(previous_states2), ))
present_states_list = list(present_states)
path_variable = []
for i in range(len(previous_states2)):
path_variable.append(i)
path_variable.append(i)
states_container.append(tuple([old_states, present_states_list, involved_transitions,
expanded_alphas, path_variable]))
#%%
# First Expansion and Contraction
mask = np.int64((2**W)-1)
t = t + 1
previous_states = states_container[t-1][1]
previous_states2 = np.asarray(previous_states)
previous_states2 = np.reshape(previous_states2, (len(previous_states2), 1))
allowed_states = np.zeros((len(previous_states2), 2))
for i in range(len(previous_states2)):
allowed_states[i, 0] = previous_states2[i] << 1
allowed_states[i, 1] = (previous_states2[i] << 1) + 1
unique_states = np.unique(allowed_states)
integrated_states = np.concatenate((previous_states2, allowed_states), axis=1)
saved_integrated_states1 = integrated_states.copy()
rowfind_list = []
for u in unique_states:
selector = (integrated_states[:, 1:3] == u)
rowfind, colfind = np.where(selector == True)
rowfind_list.append(rowfind)
expanded_alphas = []
previous_alphas = states_container[t-1][3]
involved_transitions = []
previous_alphas_matrix = np.zeros((len(previous_alphas), 2))
for r in range(len(previous_alphas)):
previous_alphas_matrix[r, 0] = r
previous_alphas_matrix[r, 1] = previous_alphas[r]
for s in range(len(unique_states)):
lookup = rowfind_list[s]
if len(lookup) == 1:
target_state = unique_states[s]
input_state = previous_states2[int(lookup)]
for_counting = np.int64(target_state)
selector2 = (previous_alphas_matrix[:, 0:1] == input_state)
rowfind2, colfind2 = np.where(selector2 == True)
rowfind2 = int(rowfind2)
if input_state % 2 == 0 and target_state % 2 == 0:
expanded_alphas.append(previous_alphas_matrix[rowfind2, 1] + off_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(off_off)
elif input_state % 2 == 0 and target_state % 2 != 0:
expanded_alphas.append(previous_alphas_matrix[rowfind2, 1] + off_on + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(off_on)
elif input_state % 2 != 0 and target_state % 2 == 0:
expanded_alphas.append(previous_alphas_matrix[rowfind2, 1] + on_off + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(on_off)
elif input_state % 2 != 0 and target_state % 2 != 0:
expanded_alphas.append(previous_alphas_matrix[rowfind2, 1] + on_on + \
calcObservationLikelihood(lambda_log, noise_temp, data[0, t], v,
for_counting, K, W, ms2_coeff_flipped))
involved_transitions.append(on_on)
accumulator = np.concatenate((np.asarray(rowfind_list), np.reshape(unique_states,
(len(unique_states), 1)),
np.reshape(np.asarray(involved_transitions),
(len(unique_states), 1)),
np.reshape(np.asarray(expanded_alphas),
(len(unique_states), 1))), axis = 1)
accumulator2 = accumulator[accumulator[:,3].argsort()[::-1]]
accumulator3 = accumulator2[0:PERMITTED_MEMORY, :]
addition_tuple = tuple([list(previous_states), list(accumulator3[:, 1].astype(int)),
list(accumulator3[:, 2]), list(accumulator3[:, 3]),
list(accumulator3[:, 0].astype(int))])
states_container.append(addition_tuple)
#%%
# First vanilla expansion and contraction
t = t + 1
mask = np.int64((2**W)-1)
previous_states = states_container[t-1][1]
previous_states2 = np.asarray(previous_states)
previous_states2 = np.reshape(previous_states2, (len(previous_states2), 1))
allowed_states = np.zeros((len(previous_states2), 2))
for i in range(len(previous_states2)):
allowed_states[i, 0] = np.bitwise_and(previous_states2[i] << 1, mask)
allowed_states[i, 1] = np.bitwise_and((previous_states2[i] << 1) + 1, mask)
unique_states = np.unique(allowed_states)
integrated_states = np.concatenate((previous_states2, allowed_states), axis=1)
rowfind_list = []
for u in unique_states:
selector = (integrated_states[:, 1:3] == u)
rowfind, colfind = np.where(selector == True)
rowfind_list.append(rowfind)
expanded_alphas = []
previous_alphas = states_container[t-1][3]
involved_transitions = []
previous_alphas_matrix = np.zeros((len(previous_alphas), 2))
for r in range(len(previous_alphas)):
previous_alphas_matrix[r, 0] = previous_states2[r]
previous_alphas_matrix[r, 1] = previous_alphas[r]
| |
from collections import OrderedDict, Iterable
from copy import deepcopy
from xml.etree import ElementTree as ET
from six import string_types
import openmc
from openmc.clean_xml import clean_xml_indentation
from openmc.checkvalue import check_type
class Geometry(object):
"""Geometry representing a collection of surfaces, cells, and universes.
Parameters
----------
root_universe : openmc.Universe, optional
Root universe which contains all others
Attributes
----------
root_universe : openmc.Universe
Root universe which contains all others
bounding_box : 2-tuple of numpy.array
Lower-left and upper-right coordinates of an axis-aligned bounding box
of the universe.
"""
def __init__(self, root_universe=None):
self._root_universe = None
self._offsets = {}
if root_universe is not None:
self.root_universe = root_universe
@property
def root_universe(self):
return self._root_universe
@property
def bounding_box(self):
return self.root_universe.bounding_box
@root_universe.setter
def root_universe(self, root_universe):
check_type('root universe', root_universe, openmc.Universe)
self._root_universe = root_universe
def add_volume_information(self, volume_calc):
"""Add volume information from a stochastic volume calculation.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'cell':
for cell in self.get_all_cells().values():
if cell.id in volume_calc.volumes:
cell.add_volume_information(volume_calc)
elif volume_calc.domain_type == 'material':
for material in self.get_all_materials().values():
if material.id in volume_calc.volumes:
material.add_volume_information(volume_calc)
elif volume_calc.domain_type == 'universe':
for universe in self.get_all_universes().values():
if universe.id in volume_calc.volumes:
universe.add_volume_information(volume_calc)
def export_to_xml(self, path='geometry.xml'):
"""Export geometry to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'geometry.xml'.
"""
# Create XML representation
root_element = ET.Element("geometry")
self.root_universe.create_xml_subelement(root_element)
# Sort the elements in the file
root_element[:] = sorted(root_element, key=lambda x: (
x.tag, int(x.get('id'))))
# Clean the indentation in the file to be user-readable
clean_xml_indentation(root_element)
# Write the XML Tree to the geometry.xml file
tree = ET.ElementTree(root_element)
tree.write(path, xml_declaration=True, encoding='utf-8')
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
return self.root_universe.find(point)
def get_instances(self, paths):
"""Return the instance number(s) for a cell/material in a geometry path.
The instance numbers are used as indices into distributed
material/temperature arrays and tally distribcell filter arrays.
Parameters
----------
paths : str or iterable of str
The path traversed through the CSG tree to reach a cell or material
instance. For example, 'u0->c10->l20(2,2,1)->u5->c5' would indicate
the cell instance whose first level is universe 0 and cell 10,
second level is lattice 20 position (2,2,1), and third level is
universe 5 and cell 5.
Returns
-------
int or list of int
Instance number(s) for the given path(s)
"""
# Make sure we are working with an iterable
return_list = (isinstance(paths, Iterable) and
not isinstance(paths, string_types))
path_list = paths if return_list else [paths]
indices = []
for p in path_list:
# Extract the cell id from the path
last_index = p.rfind('>')
last_path = p[last_index+1:]
uid = int(last_path[1:])
# Get corresponding cell/material
if last_path[0] == 'c':
obj = self.get_all_cells()[uid]
elif last_path[0] == 'm':
obj = self.get_all_materials()[uid]
# Determine index in paths array
try:
indices.append(obj.paths.index(p))
except ValueError:
indices.append(None)
return indices if return_list else indices[0]
def get_all_cells(self):
"""Return all cells in the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping cell IDs to :class:`openmc.Cell` instances
"""
return self.root_universe.get_all_cells()
def get_all_universes(self):
"""Return all universes in the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping universe IDs to :class:`openmc.Universe`
instances
"""
universes = OrderedDict()
universes[self.root_universe.id] = self.root_universe
universes.update(self.root_universe.get_all_universes())
return universes
def get_all_materials(self):
"""Return all materials within the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping material IDs to :class:`openmc.Material`
instances
"""
return self.root_universe.get_all_materials()
def get_all_material_cells(self):
"""Return all cells filled by a material
Returns
-------
collections.OrderedDict
Dictionary mapping cell IDs to :class:`openmc.Cell` instances that
are filled with materials or distributed materials.
"""
material_cells = OrderedDict()
for cell in self.get_all_cells().values():
if cell.fill_type in ('material', 'distribmat'):
if cell not in material_cells:
material_cells[cell.id] = cell
return material_cells
def get_all_material_universes(self):
"""Return all universes having at least one material-filled cell.
This method can be used to find universes that have at least one cell
that is filled with a material or is void.
Returns
-------
collections.OrderedDict
Dictionary mapping universe IDs to :class:`openmc.Universe`
instances with at least one material-filled cell
"""
material_universes = OrderedDict()
for universe in self.get_all_universes().values():
for cell in universe.cells.values():
if cell.fill_type in ('material', 'distribmat', 'void'):
if universe not in material_universes:
material_universes[universe.id] = universe
return material_universes
def get_all_lattices(self):
"""Return all lattices defined
Returns
-------
collections.OrderedDict
Dictionary mapping lattice IDs to :class:`openmc.Lattice` instances
"""
lattices = OrderedDict()
for cell in self.get_all_cells().values():
if cell.fill_type == 'lattice':
if cell.fill not in lattices:
lattices[cell.fill.id] = cell.fill
return lattices
def get_all_surfaces(self):
"""
Return all surfaces used in the geometry
Returns
-------
collections.OrderedDict
Dictionary mapping surface IDs to :class:`openmc.Surface` instances
"""
surfaces = OrderedDict()
for cell in self.get_all_cells().values():
surfaces = cell.region.get_surfaces(surfaces)
return surfaces
def get_materials_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of materials with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
material's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Material
Materials matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_materials = self.get_all_materials().values()
materials = set()
for material in all_materials:
material_name = material.name
if not case_sensitive:
material_name = material_name.lower()
if material_name == name:
materials.add(material)
elif not matching and name in material_name:
materials.add(material)
materials = list(materials)
materials.sort(key=lambda x: x.id)
return materials
def get_cells_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with matching names.
Parameters
----------
name : str
The name to search match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_cells = self.get_all_cells().values()
cells = set()
for cell in all_cells:
cell_name = cell.name
if not case_sensitive:
cell_name = cell_name.lower()
if cell_name == name:
cells.add(cell)
elif not matching and name in cell_name:
cells.add(cell)
cells = list(cells)
cells.sort(key=lambda x: x.id)
return cells
def get_cells_by_fill_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with fills with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells with fills matching the queried name
"""
if not case_sensitive:
name = name.lower()
cells = set()
for cell in self.get_all_cells().values():
names = []
if cell.fill_type in ('material', 'universe', 'lattice'):
names.append(cell.fill.name)
elif cell.fill_type == 'distribmat':
for mat in cell.fill:
if mat is not None:
names.append(mat.name)
for fill_name in names:
if not case_sensitive:
fill_name = fill_name.lower()
if fill_name == name:
cells.add(cell)
elif not matching and name in fill_name:
cells.add(cell)
cells = list(cells)
cells.sort(key=lambda x: x.id)
return cells
def get_universes_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of universes with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
universe's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Universe
Universes matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_universes = self.get_all_universes().values()
universes = set()
for universe in all_universes:
universe_name = universe.name
if not case_sensitive:
universe_name = universe_name.lower()
if universe_name == name:
universes.add(universe)
elif not matching and name in universe_name:
universes.add(universe)
universes = list(universes)
universes.sort(key=lambda x: x.id)
return universes
def get_lattices_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of lattices with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
lattice's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Lattice
Lattices matching the queried name
"""
if not case_sensitive:
name = name.lower()
all_lattices = self.get_all_lattices().values()
lattices = set()
for lattice | |
[], # Beverages, lemonade, frozen concentrate, pink, prepared with water
14544: [], # Beverages, tea, black, brewed, prepared with distilled water
14545: [], # Beverages, tea, herb, brewed, chamomile
14548: [], # Beverages, tea, instant, lemon, with added ascorbic acid
14550: [], # Alcoholic beverage, distilled, all (gin, rum, vodka, whiskey) 86 proof
14551: [], # Alcoholic beverage, distilled, all (gin, rum, vodka, whiskey) 90 proof
14552: [], # Carbonated beverage, chocolate-flavored soda
14553: [], # Beverages, Wine, non-alcoholic
14555: ["Water", "", "Water"], # Water, bottled, generic
14557: [], # Beverages, chocolate-flavor beverage mix for milk, powder, with added nutrients
14558: [], # Beverages, chocolate-flavor beverage mix for milk, powder, with added nutrients, prepared with whole milk
14559: [], # Beverages, water, bottled, non-carbonated, EVIAN
14599: [], # Beverages, Powerade Zero Ion4, calorie-free, assorted flavors
14601: [], # Beverages, WENDY'S, tea, ready-to-drink, unsweetened
14602: [], # Alcoholic Beverage, wine, table, red, Merlot
14604: [], # Water, non-carbonated, bottles, natural fruit flavors, sweetened with low calorie sweetener
14605: [], # Beverages, Water with added vitamins and minerals, bottles, sweetened, assorted fruit flavors
14607: [], # Beverages, V8 SPLASH Juice Drinks, Diet Berry Blend
14608: [], # Beverages, V8 SPLASH Juice Drinks, Diet Fruit Medley
14609: [], # Beverages, V8 SPLASH Juice Drinks, Diet Strawberry Kiwi
14610: [], # Beverages, V8 SPLASH Juice Drinks, Diet Tropical Blend
14611: [], # Beverages, V8 SPLASH Juice Drinks, Berry Blend
14612: [], # Beverages, V8 SPLASH Juice Drinks, Fruit Medley
14613: [], # Beverages, V8 SPLASH Juice Drinks, Guava Passion Fruit
14614: [], # Beverages, V8 SPLASH Juice Drinks, Mango Peach
14615: [], # Beverages, V8 SPLASH Juice Drinks, Orange Pineapple
14616: [], # Beverages, V8 SPLASH Juice Drinks, Orchard Blend
14617: [], # Beverages, V8 SPLASH Juice Drinks, Strawberry Banana
14618: [], # Beverages, V8 SPLASH Juice Drinks, Strawberry Kiwi
14619: [], # Beverages, V8 SPLASH Juice Drinks, Tropical Blend
14620: [], # Beverages, V8 V-FUSION Juices, Peach Mango
14621: [], # Beverages, V8 V-FUSION Juices, Strawberry Banana
14622: [], # Beverages, V8 V-FUSION Juices, Tropical
14623: [], # Beverages, V8 V- FUSION Juices, Acai Berry
14625: [], # Beverages, Energy drink, AMP
14626: [], # Beverages, Energy drink, FULL THROTTLE
14627: [], # Beverages, Energy Drink, Monster, fortified with vitamins C, B2, B3, B6, B12
14628: [], # Beverages, Energy drink, AMP, sugar free
14629: [], # Beverages, Energy drink, ROCKSTAR
14630: [], # Beverages, Energy drink, ROCKSTAR, sugar free
14632: [], # Beverages, Meal supplement drink, canned, peanut flavor
14633: [], # Beverages, Vegetable and fruit juice drink, reduced calorie, with low-calorie sweetener, added vitamin C
14634: [], # Beverages, milk beverage, reduced fat, flavored and sweetened, Ready-to-drink, added calcium, vitamin A and vitamin D
14635: [], # Beverages, vegetable and fruit juice blend, 100% juice, with added vitamins A, C, E
14636: [], # Beverages, fruit juice drink, reduced sugar, with vitamin E added
14637: [], # Water, with corn syrup and/or sugar and low calorie sweetener, fruit flavored
14638: [], # Beverages, Horchata, as served in restaurant
14639: [], # Beverages, rice milk, unsweetened
14640: [], # Beverages, Energy drink, VAULT, citrus flavor
14641: [], # Beverages, Energy drink, VAULT Zero, sugar-free, citrus flavor
14644: [], # Beverages, PEPSICO QUAKER, Gatorade G2, low calorie
14645: [], # Beverages, Fruit flavored drink, less than 3% juice, not fortified with vitamin C
14646: [], # Beverages, Fruit flavored drink containing less than 3% fruit juice, with high vitamin C
14647: [], # Beverages, Fruit flavored drink, reduced sugar, greater than 3% fruit juice, high vitamin C, added calcium
14648: [], # Beverages, fruit juice drink, greater than 3% fruit juice, high vitamin C and added thiamin
14649: [], # Beverages, tea, hibiscus, brewed
14651: [], # Beverages, fruit juice drink, greater than 3% juice, high vitamin C
14654: [], # Beverages, nutritional shake mix, high protein, powder
15001: ["Anchovy fish"], # Fish, anchovy, european, raw
15002: [], # Fish, anchovy, european, canned in oil, drained solids
15003: ["Bass fish"], # Fish, bass, fresh water, mixed species, raw
15004: [], # Fish, bass, striped, raw
15005: ["Bluefish fish"], # Fish, bluefish, raw
15006: ["Burbot fish"], # Fish, burbot, raw
15007: ["Butterfish fish"], # Fish, butterfish, raw
15008: ["Carp fish"], # Fish, carp, raw
15009: [], # Fish, carp, cooked, dry heat
15010: ["Catfish fish", "wild channel"], # Fish, catfish, channel, wild, raw
15011: [], # Fish, catfish, channel, cooked, breaded and fried
15012: [], # Fish, caviar, black and red, granular
15013: ["Cisco fish"], # Fish, cisco, raw
15014: [], # Fish, cisco, smoked
15015: ["Cod fish"], # Fish, cod, Atlantic, raw
15016: [], # Fish, cod, Atlantic, cooked, dry heat
15017: [], # Fish, cod, Atlantic, canned, solids and liquid
15018: [], # Fish, cod, Atlantic, dried and salted
15019: [], # Fish, cod, Pacific, raw (may have been previously frozen)
15020: ["Croaker fish"], # Fish, croaker, Atlantic, raw
15021: [], # Fish, croaker, Atlantic, cooked, breaded and fried
15022: ["Cusk fish"], # Fish, cusk, raw
15023: ["Mahimahi fish"], # Fish, mahimahi, raw
15024: ["Drum fish"], # Fish, drum, freshwater, raw
15025: ["Eel fish"], # Fish, eel, mixed species, raw
15026: [], # Fish, eel, mixed species, cooked, dry heat
15027: [], # Fish, fish sticks, frozen, prepared
15028: ["Flatfish fish"], # Fish, flatfish (flounder and sole species), raw
15029: [], # Fish, flatfish (flounder and sole species), cooked, dry heat
15030: [], # Fish, gefiltefish, commercial, sweet recipe
15031: ["Grouper fish"], # Fish, grouper, mixed species, raw
15032: [], # Fish, grouper, mixed species, cooked, dry heat
15033: ["Haddock fish"], # Fish, haddock, raw
15034: [], # Fish, haddock, cooked, dry heat
15035: [], # Fish, haddock, smoked
15036: ["Halibut fish"], # Fish, halibut, Atlantic and Pacific, raw
15037: [], # Fish, halibut, Atlantic and Pacific, cooked, dry heat
15038: [], # Fish, halibut, Greenland, raw
15039: ["Herring fish"], # Fish, herring, Atlantic, raw
15040: [], # Fish, herring, Atlantic, cooked, dry heat
15041: [], # Fish, herring, Atlantic, pickled
15042: [], # Fish, herring, Atlantic, kippered
15043: [], # Fish, herring, Pacific, raw
15044: ["Ling fish"], # Fish, ling, raw
15045: ["Lingcod fish"], # Fish, lingcod, raw
15046: ["Mackerel fish"], # Fish, mackerel, Atlantic, raw
15047: [], # Fish, mackerel, Atlantic, cooked, dry heat
15048: [], # Fish, mackerel, jack, canned, drained solids
15049: [], # Fish, mackerel, king, raw
15050: [], # Fish, mackerel, Pacific and jack, mixed species, raw
15051: [], # Fish, mackerel, spanish, raw
15052: [], # Fish, mackerel, spanish, cooked, dry heat
15053: ["Milkfish fish"], # Fish, milkfish, raw
15054: ["Monkfish fish"], # Fish, monkfish, raw
15055: ["Mullet fish"], # Fish, mullet, striped, raw
15056: [], # Fish, mullet, striped, cooked, dry heat
15057: ["Ocean perch fish"], # Fish, ocean perch, Atlantic, raw
15058: [], # Fish, ocean perch, Atlantic, cooked, dry heat
15059: ["Pout fish"], # Fish, pout, ocean, raw
15060: ["Perch fish"], # Fish, perch, mixed species, raw
15061: [], # Fish, perch, mixed species, cooked, dry heat
15062: ["Pike fish"], # Fish, pike, northern, raw
15063: [], # Fish, pike, northern, cooked, dry heat
15064: [], # Fish, pike, walleye, raw
15065: ["Pollock fish"], # Fish, pollock, Atlantic, raw
15066: [], # Fish, pollock, Alaska, raw (may contain additives to retain moisture)
15067: [], # Fish, pollock, Alaska, cooked, dry heat (may contain additives to retain moisture)
15068: ["Pompano fish"], # Fish, pompano, florida, raw
15069: [], # Fish, pompano, florida, cooked, dry heat
15070: ["Rockfish fish"], # Fish, rockfish, Pacific, mixed species, raw
15071: [], # Fish, rockfish, Pacific, mixed species, cooked, dry heat
15072: ["Roe fish"], # Fish, roe, mixed species, raw
15073: ["Roughy fish"], # Fish, roughy, orange, raw
| |
<gh_stars>0
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from six.moves import range
import time
import math
import mock
import pytest
from jaeger_client.sampler import (
Sampler,
ConstSampler,
ProbabilisticSampler,
RateLimitingSampler,
RemoteControlledSampler,
GuaranteedThroughputProbabilisticSampler,
AdaptiveSampler,
DEFAULT_MAX_OPERATIONS,
DEFAULT_SAMPLING_PROBABILITY,
get_sampling_probability,
get_rate_limit,
)
MAX_INT = 1 << 63
def get_tags(type, param):
return {
'sampler.type': type,
'sampler.param': param,
}
def test_abstract_sampler_errors():
sampler = Sampler()
with pytest.raises(NotImplementedError):
sampler.is_sampled(trace_id=123)
with pytest.raises(NotImplementedError):
sampler.close()
def test_probabilistic_sampler_errors():
with pytest.raises(AssertionError):
ProbabilisticSampler(-0.1)
with pytest.raises(AssertionError):
ProbabilisticSampler(1.1)
def test_probabilistic_sampler():
sampler = ProbabilisticSampler(0.5)
assert MAX_INT == 0x8000000000000000
sampled, tags = sampler.is_sampled(MAX_INT - 10)
assert sampled
assert tags == get_tags('probabilistic', 0.5)
sampled, _ = sampler.is_sampled(MAX_INT + 10)
assert not sampled
sampler.close()
assert '%s' % sampler == 'ProbabilisticSampler(0.5)'
def test_const_sampler():
sampler = ConstSampler(True)
sampled, _ = sampler.is_sampled(1)
assert sampled
sampled, _ = sampler.is_sampled(MAX_INT)
assert sampled
sampler = ConstSampler(False)
sampled, tags = sampler.is_sampled(1)
assert not sampled
sampled, tags = sampler.is_sampled(MAX_INT)
assert not sampled
assert tags == get_tags('const', False)
assert '%s' % sampler == 'ConstSampler(False)'
def test_rate_limiting_sampler():
sampler = RateLimitingSampler(2)
sampler.rate_limiter.balance = 2.0
# stop time by overwriting timestamp() function to always return
# the same time
ts = time.time()
sampler.rate_limiter.last_tick = ts
with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
as mock_time:
mock_time.side_effect = lambda: ts # always return same time
assert sampler.rate_limiter.timestamp() == ts
sampled, _ = sampler.is_sampled(0)
assert sampled, 'initial balance allows first item'
sampled, _ = sampler.is_sampled(0)
assert sampled, 'initial balance allows second item'
sampled, _ = sampler.is_sampled(0)
assert not sampled, 'initial balance exhausted'
# move time 250ms forward, not enough credits to pay for one sample
mock_time.side_effect = lambda: ts + 0.25
sampled, _ = sampler.is_sampled(0)
assert not sampled, 'not enough time passed for full item'
# move time 500ms forward, now enough credits to pay for one sample
mock_time.side_effect = lambda: ts + 0.5
sampled, _ = sampler.is_sampled(0)
assert sampled, 'enough time for new item'
sampled, _ = sampler.is_sampled(0)
assert not sampled, 'no more balance'
# move time 5s forward, enough to accumulate credits for 10 samples,
# but it should still be capped at 2
sampler.last_tick = ts # reset the timer
mock_time.side_effect = lambda: ts + 5
sampled, _ = sampler.is_sampled(0)
assert sampled, 'enough time for new item'
sampled, _ = sampler.is_sampled(0)
assert sampled, 'enough time for second new item'
for i in range(0, 8):
sampled, tags = sampler.is_sampled(0)
assert not sampled, 'but no further, since time is stopped'
assert tags == get_tags('ratelimiting', 2)
sampler.close()
assert '%s' % sampler == 'RateLimitingSampler(2)'
# Test with rate limit of greater than 1 second
sampler = RateLimitingSampler(0.1)
sampler.rate_limiter.balance = 1.0
ts = time.time()
sampler.rate_limiter.last_tick = ts
with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
as mock_time:
mock_time.side_effect = lambda: ts # always return same time
assert sampler.rate_limiter.timestamp() == ts
sampled, _ = sampler.is_sampled(0)
assert sampled, 'initial balance allows first item'
sampled, _ = sampler.is_sampled(0)
assert not sampled, 'initial balance exhausted'
# move time 11s forward, enough credits to pay for one sample
mock_time.side_effect = lambda: ts + 11
sampled, _ = sampler.is_sampled(0)
assert sampled
sampler.close()
assert '%s' % sampler == 'RateLimitingSampler(0.1)'
# Test update
sampler = RateLimitingSampler(3.0)
sampler.rate_limiter.balance = 3.0
ts = time.time()
sampler.rate_limiter.last_tick = ts
with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
as mock_time:
mock_time.side_effect = lambda: ts # always return same time
assert sampler.rate_limiter.timestamp() == ts
sampled, _ = sampler.is_sampled(0)
assert sampled
assert sampler.rate_limiter.balance == 2.0
assert '%s' % sampler == 'RateLimitingSampler(3.0)'
sampler.update(3.0)
assert '%s' % sampler == \
'RateLimitingSampler(3.0)', 'should short cirtcuit if rate is the same'
sampler.update(2.0)
assert sampler.rate_limiter.balance == 4.0 / 3.0
assert '%s' % sampler == 'RateLimitingSampler(2.0)'
sampler.close()
def test_guaranteed_throughput_probabilistic_sampler():
sampler = GuaranteedThroughputProbabilisticSampler('op',
2,
0.5)
sampler.lower_bound_sampler.rate_limiter.balance = 2.0
sampled, tags = sampler.is_sampled(MAX_INT - 10)
assert sampled
assert tags == get_tags('probabilistic', 0.5)
sampled, tags = sampler.is_sampled(MAX_INT + 10)
assert sampled
assert tags == get_tags('lowerbound', 0.5)
sampled, _ = sampler.is_sampled(MAX_INT + 10)
assert not sampled
assert '%s' % sampler == 'GuaranteedThroughputProbabilisticSampler(op, 0.500000, 2.000000)'
sampler.update(3, 0.51)
sampler.lower_bound_sampler.rate_limiter.balance = 3.0
sampled, tags = sampler.is_sampled(MAX_INT - 10)
assert sampled
assert tags == get_tags('probabilistic', 0.51)
sampled, tags = sampler.is_sampled(MAX_INT + (MAX_INT / 4))
assert sampled
assert tags == get_tags('lowerbound', 0.51)
assert '%s' % sampler == 'GuaranteedThroughputProbabilisticSampler(op, 0.510000, 3.000000)'
sampler.close()
def test_adaptive_sampler():
strategies = {
'defaultSamplingProbability': 0.51,
'defaultLowerBoundTracesPerSecond': 3,
'perOperationStrategies':
[
{
'operation': 'op',
'probabilisticSampling': {
'samplingRate': 0.5
}
}
]
}
sampler = AdaptiveSampler(strategies, 2)
sampled, tags = sampler.is_sampled(MAX_INT - 10, 'op')
assert sampled
assert tags == get_tags('probabilistic', 0.5)
# This operation is seen for the first time by the sampler
sampled, tags = sampler.is_sampled(MAX_INT - 10, 'new_op')
assert sampled
assert tags == get_tags('probabilistic', 0.51)
ts = time.time()
with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
as mock_time:
# Move time forward by a second to guarantee the rate limiter has enough credits
mock_time.side_effect = lambda: ts + 1
sampled, tags = sampler.is_sampled(MAX_INT + (MAX_INT / 4), 'new_op')
assert sampled
assert tags == get_tags('lowerbound', 0.51)
# This operation is seen for the first time by the sampler but surpasses
# max_operations of 2. The default probabilistic sampler will be used
sampled, tags = sampler.is_sampled(MAX_INT - 10, 'new_op_2')
assert sampled
assert tags == get_tags('probabilistic', 0.51)
sampled, _ = sampler.is_sampled(MAX_INT + (MAX_INT / 4), 'new_op_2')
assert not sampled
assert '%s' % sampler == 'AdaptiveSampler(0.510000, 3.000000, 2)'
# Update the strategies
strategies = {
'defaultSamplingProbability': 0.52,
'defaultLowerBoundTracesPerSecond': 4,
'perOperationStrategies':
[
{
'operation': 'op',
'probabilisticSampling': {
'samplingRate': 0.52
}
},
{
'operation': 'new_op_3',
'probabilisticSampling': {
'samplingRate': 0.53
}
}
]
}
sampler.update(strategies)
# The probability for op has been updated
sampled, tags = sampler.is_sampled(MAX_INT - 10, 'op')
assert sampled
assert tags == get_tags('probabilistic', 0.52)
# A new operation has been added
sampled, tags = sampler.is_sampled(MAX_INT - 10, 'new_op_3')
assert sampled
assert tags == get_tags('probabilistic', 0.53)
assert '%s' % sampler == 'AdaptiveSampler(0.520000, 4.000000, 2)'
sampler.close()
def test_adaptive_sampler_default_values():
adaptive_sampler = AdaptiveSampler({}, 2)
assert '%s' % adaptive_sampler == \
'AdaptiveSampler(0.001000, 0.001667, 2)', 'sampler should use default values'
sampled, tags = adaptive_sampler.is_sampled(0, 'op')
assert sampled
assert tags == \
get_tags('probabilistic', 0.001), 'should use default probability'
assert '%s' % adaptive_sampler.samplers['op'] == \
'GuaranteedThroughputProbabilisticSampler(op, 0.001000, 0.001667)'
adaptive_sampler.update(strategies={
'defaultLowerBoundTracesPerSecond': 4,
'perOperationStrategies':
[
{
'operation': 'new_op',
'probabilisticSampling': {
'samplingRate': 0.002
}
}
]
})
assert '%s' % adaptive_sampler == 'AdaptiveSampler(0.001000, 4.000000, 2)'
sampled, tags = adaptive_sampler.is_sampled(0, 'new_op')
assert sampled
assert tags == get_tags('probabilistic', 0.002)
assert '%s' % adaptive_sampler.samplers['new_op'] == \
'GuaranteedThroughputProbabilisticSampler(new_op, 0.002000, 4.000000)'
sampled, tags = adaptive_sampler.is_sampled(0, 'op')
assert sampled
assert tags == get_tags('probabilistic', 0.001)
# TODO ruh roh, the lowerbound isn't changed
# if the operation isn't included in perOperationStrategies
assert '%s' % adaptive_sampler.samplers['op'] == \
'GuaranteedThroughputProbabilisticSampler(op, 0.001000, 0.001667)'
def test_sampler_equality():
const1 = ConstSampler(True)
const2 = ConstSampler(True)
const3 = ConstSampler(False)
assert const1 == const2
assert const1 != const3
prob1 = ProbabilisticSampler(rate=0.01)
prob2 = ProbabilisticSampler(rate=0.01)
prob3 = ProbabilisticSampler(rate=0.02)
assert prob1 == prob2
assert prob1 != prob3
assert const1 != prob1
rate1 = RateLimitingSampler(max_traces_per_second=0.01)
rate2 = RateLimitingSampler(max_traces_per_second=0.01)
rate3 = RateLimitingSampler(max_traces_per_second=0.02)
assert rate1 == rate2
assert rate1 != rate3
assert rate1 != const1
assert rate1 != prob1
def test_remotely_controlled_sampler():
sampler = RemoteControlledSampler(
channel=mock.MagicMock(),
service_name='x'
)
sampled, tags = sampler.is_sampled(1)
assert sampled
assert tags == get_tags('probabilistic', DEFAULT_SAMPLING_PROBABILITY)
init_sampler = mock.MagicMock()
init_sampler.is_sampled = mock.MagicMock()
channel = mock.MagicMock()
channel.io_loop = None
sampler = RemoteControlledSampler(
channel=channel,
service_name='x',
init_sampler=init_sampler,
logger=mock.MagicMock(),
)
assert init_sampler.is_sampled.call_count == 1
sampler.is_sampled(1)
assert init_sampler.is_sampled.call_count == 2
sampler.io_loop = mock.MagicMock()
# noinspection PyProtectedMember
sampler._init_polling()
assert sampler.io_loop.call_later.call_count == 1
sampler._create_periodic_callback = mock.MagicMock()
# noinspection PyProtectedMember
sampler._delayed_polling()
sampler.close()
sampler = RemoteControlledSampler(
channel=mock.MagicMock(),
service_name='x',
max_operations=None,
)
assert sampler.max_operations == DEFAULT_MAX_OPERATIONS
sampler.close()
assert not sampler.running
sampler._init_polling()
assert not sampler.running
sampler._delayed_polling()
assert not sampler.running
# noinspection PyProtectedMember
def test_sampling_request_callback():
channel = mock.MagicMock()
channel.io_loop = mock.MagicMock()
error_reporter = mock.MagicMock()
error_reporter.error = mock.MagicMock()
sampler = RemoteControlledSampler(
channel=channel,
service_name='x',
error_reporter=error_reporter,
max_operations=10,
| |
<reponame>LMNS3d/sharpy
"""
@modified <NAME>
"""
import ctypes as ct
import numpy as np
import scipy as sc
import os
import itertools
import warnings
import sharpy.structure.utils.xbeamlib as xbeamlib
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
import sharpy.utils.cout_utils as cout
import sharpy.structure.utils.modalutils as modalutils
@solver
class Modal(BaseSolver):
"""
``Modal`` solver class, inherited from ``BaseSolver``
Extracts the ``M``, ``K`` and ``C`` matrices from the ``Fortran`` library for the beam. Depending on the choice of
modal projection, these may or may not be transformed to a state-space form to compute the eigenvalues and mode shapes
of the structure.
"""
solver_id = 'Modal'
solver_classification = 'Linear'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Write status to screen'
settings_types['folder'] = 'str'
settings_default['folder'] = './output'
settings_description['folder'] = 'Output folder'
# solution options
settings_types['rigid_body_modes'] = 'bool'
settings_default['rigid_body_modes'] = False
settings_description['rigid_body_modes'] = 'Write modes with rigid body mode shapes'
settings_types['use_undamped_modes'] = 'bool' # basis for modal projection
settings_default['use_undamped_modes'] = True
settings_description['use_undamped_modes'] = 'Project the modes onto undamped mode shapes'
settings_types['NumLambda'] = 'int' # no. of different modes to retain
settings_default['NumLambda'] = 20 # doubles if use_undamped_modes is False
settings_description['NumLambda'] = 'Number of modes to retain'
settings_types['keep_linear_matrices'] = 'bool' # attach linear M,C,K matrices to output dictionary
settings_default['keep_linear_matrices'] = True
settings_description['keep_linear_matrices'] = 'Save M, C and K matrices to output dictionary'
# output options
settings_types['write_modes_vtk'] = 'bool' # write displacements mode shapes in vtk file
settings_default['write_modes_vtk'] = True
settings_description['write_modes_vtk'] = 'Write Paraview files with mode shapes'
settings_types['print_matrices'] = 'bool' # print M,C,K matrices to dat file
settings_default['print_matrices'] = False
settings_description['print_matrices'] = 'Write M, C and K matrices to file'
settings_types['write_dat'] = 'bool' # write modes shapes/freq./damp. to dat file
settings_default['write_dat'] = True
settings_description['write_dat'] = 'Write mode shapes, frequencies and damping to file'
settings_types['continuous_eigenvalues'] = 'bool'
settings_default['continuous_eigenvalues'] = False
settings_description['continuous_eigenvalues'] = 'Use continuous time eigenvalues'
settings_types['dt'] = 'float'
settings_default['dt'] = 0
settings_description['dt'] = 'Time step to compute discrete time eigenvalues'
settings_types['delta_curved'] = 'float'
settings_default['delta_curved'] = 1e-2
settings_description['delta_curved'] = 'Threshold for linear expressions in rotation formulas'
settings_types['plot_eigenvalues'] = 'bool'
settings_default['plot_eigenvalues'] = False
settings_description['plot_eigenvalues'] = 'Plot to screen root locus diagram'
settings_types['max_rotation_deg'] = 'float'
settings_default['max_rotation_deg'] = 15.
settings_description['max_rotation_deg'] = 'Scale mode shape to have specified maximum rotation'
settings_types['max_displacement'] = 'float'
settings_default['max_displacement'] = 0.15
settings_description['max_displacement'] = 'Scale mode shape to have specified maximum displacement'
settings_types['use_custom_timestep'] = 'int'
settings_default['use_custom_timestep'] = -1
settings_description['use_custom_timestep'] = 'If > -1, it will use that time step geometry for calculating the modes'
settings_types['rigid_modes_cg'] = 'bool'
settings_default['rigid_modes_cg'] = False
settings_description['rigid_modes_cg'] = 'Modify the ridid body modes such that they are defined wrt to the CG'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.data = None
self.settings = None
self.folder = None
self.eigenvalue_table = None
self.filename_freq = None
self.filename_damp = None
self.filename_shapes = None
self.rigid_body_motion = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings,
self.settings_types,
self.settings_default)
self.rigid_body_motion = self.settings['rigid_body_modes'].value
self.data.ts = len(self.data.structure.timestep_info) - 1
if self.settings['use_custom_timestep'].value > -1:
self.data.ts = self.settings['use_custom_timestep'].value
# load info from dyn dictionary
self.data.structure.add_unsteady_information(
self.data.structure.dyn_dict,
self.data.ts)
# create folder for containing files if necessary
if not os.path.exists(self.settings['folder']):
os.makedirs(self.settings['folder'])
self.folder = (self.settings['folder'] + '/' +
self.data.settings['SHARPy']['case'] +
'/beam_modal_analysis/')
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self.filename_freq = (self.folder +
'tstep' + ("%06d" % self.data.ts) +
'_ModalFrequencies.dat')
self.filename_damp = (self.folder +
'tstep' + ("%06d" % self.data.ts) +
'_ModalDamping.dat')
self.filename_shapes = (self.folder +
'tstep' + ("%06d" % self.data.ts) +
'_ModalShape')
if self.settings['print_info']:
cout.cout_wrap('Structural eigenvalues')
# self.eigenvalue_table = cout.TablePrinter(7, 12, ['d', 'f', 'f', 'f', 'f', 'f', 'f'])
# self.eigenvalue_table.print_header(['mode', 'eval_real', 'eval_imag', 'freq_n (Hz)', 'freq_d (Hz)',
# 'damping', 'period (s)'])
self.eigenvalue_table = modalutils.EigenvalueTable()
self.eigenvalue_table.print_header(self.eigenvalue_table.headers)
def run(self):
r"""
Extracts the eigenvalues and eigenvectors of the clamped structure.
If ``use_undamped_modes == True`` then the free vibration modes of the clamped structure are found solving:
.. math:: \mathbf{M\,\ddot{\eta}} + \mathbf{K\,\eta} = 0
that flows down to solving the non-trivial solutions to:
.. math:: (-\omega_n^2\,\mathbf{M} + \mathbf{K})\mathbf{\Phi} = 0
On the other hand, if the damped modes are chosen because the system has damping, the free vibration
modes are found solving the equation of motion of the form:
.. math:: \mathbf{M\,\ddot{\eta}} + \mathbf{C\,\dot{\eta}} + \mathbf{K\,\eta} = 0
which can be written in state space form, with the state vector :math:`\mathbf{x} = [\eta^T,\,\dot{\eta}^T]^T`
as
.. math:: \mathbf{\dot{x}} = \begin{bmatrix} 0 & \mathbf{I} \\ -\mathbf{M^{-1}K} & -\mathbf{M^{-1}C}
\end{bmatrix} \mathbf{x}
and therefore the mode shapes and frequencies correspond to the solution of the eigenvalue problem
.. math:: \mathbf{A\,\Phi} = \mathbf{\Lambda\,\Phi}.
From the eigenvalues, the following system characteristics are provided:
* Natural Frequency: :math:`\omega_n = |\lambda|`
* Damped natural frequency: :math:`\omega_d = \text{Im}(\lambda) = \omega_n \sqrt{1-\zeta^2}`
* Damping ratio: :math:`\zeta = -\frac{\text{Re}(\lambda)}{\omega_n}`
In addition to the above, the modal output dictionary includes the following:
* ``M``: Tangent mass matrix
* ``C``: Tangent damping matrix
* ``K``: Tangent stiffness matrix
* ``Ccut``: Modal damping matrix :math:`\mathbf{C}_m = \mathbf{\Phi}^T\mathbf{C}\mathbf{\Phi}`
* ``Kin_damp``: Forces gain matrix (when damped): :math:`K_{in} = \mathbf{\Phi}_L^T \mathbf{M}^{-1}`
* ``eigenvectors``: Right eigenvectors
* ``eigenvectors_left``: Left eigenvectors given when the system is damped
Returns:
PreSharpy: updated data object with modal analysis as part of the last structural time step.
"""
# Number of degrees of freedom
num_str_dof = self.data.structure.num_dof.value
if self.rigid_body_motion:
num_rigid_dof = 10
else:
num_rigid_dof = 0
num_dof = num_str_dof + num_rigid_dof
# if NumLambda
# Initialize matrices
FullMglobal = np.zeros((num_dof, num_dof),
dtype=ct.c_double, order='F')
FullKglobal = np.zeros((num_dof, num_dof),
dtype=ct.c_double, order='F')
FullCglobal = np.zeros((num_dof, num_dof),
dtype=ct.c_double, order='F')
if self.rigid_body_motion:
# Settings for the assembly of the matrices
# try:
# full_matrix_settings = self.data.settings['StaticCoupled']['structural_solver_settings']
# full_matrix_settings['dt'] = ct.c_double(0.01) # Dummy: required but not used
# full_matrix_settings['newmark_damp'] = ct.c_double(1e-2) # Dummy: required but not used
# except KeyError:
# full_matrix_settings = self.data.settings['DynamicCoupled']['structural_solver_settings']
import sharpy.solvers._basestructural as basestructuralsolver
full_matrix_settings = basestructuralsolver._BaseStructural().settings_default
settings.to_custom_types(full_matrix_settings, basestructuralsolver._BaseStructural().settings_types, full_matrix_settings)
# Obtain the tangent mass, damping and stiffness matrices
FullMglobal, FullCglobal, FullKglobal, FullQ = xbeamlib.xbeam3_asbly_dynamic(self.data.structure,
self.data.structure.timestep_info[self.data.ts],
full_matrix_settings)
else:
xbeamlib.cbeam3_solv_modal(self.data.structure,
self.settings, self.data.ts,
FullMglobal, FullCglobal, FullKglobal)
# Print matrices
if self.settings['print_matrices'].value:
np.savetxt(self.folder + "Mglobal.dat", FullMglobal, fmt='%.12f',
delimiter='\t', newline='\n')
np.savetxt(self.folder + "Cglobal.dat", FullCglobal, fmt='%.12f',
delimiter='\t', newline='\n')
np.savetxt(self.folder + "Kglobal.dat", FullKglobal, fmt='%.12f',
delimiter='\t', newline='\n')
# Check if the damping matrix is zero (issue working)
if self.settings['use_undamped_modes'].value:
zero_FullCglobal = True
for i,j in itertools.product(range(num_dof),range(num_dof)):
if np.absolute(FullCglobal[i, j]) > np.finfo(float).eps:
zero_FullCglobal = False
warnings.warn('Projecting a system with damping on undamped modal shapes')
break
# Check if the damping matrix is skew-symmetric
# skewsymmetric_FullCglobal = True
# for i in range(num_dof):
# for j in range(i:num_dof):
# if((i==j) and (np.absolute(FullCglobal[i, j]) > np.finfo(float).eps)):
# skewsymmetric_FullCglobal = False
# elif(np.absolute(FullCglobal[i, j] + FullCglobal[j, i]) > np.finfo(float).eps):
# skewsymmetric_FullCglobal = False
NumLambda = min(num_dof, self.settings['NumLambda'].value)
if self.settings['use_undamped_modes'].value:
# Solve for eigenvalues (with unit eigenvectors)
eigenvalues,eigenvectors=np.linalg.eig(
np.linalg.solve(FullMglobal,FullKglobal))
eigenvectors_left=None
# Define vibration frequencies and damping
freq_natural = np.sqrt(eigenvalues)
order = np.argsort(freq_natural)[:NumLambda]
freq_natural = freq_natural[order]
#freq_damped = freq_natural
eigenvalues = eigenvalues[order]
eigenvectors = eigenvectors[:,order]
damping = np.zeros((NumLambda,))
else:
# State-space model
Minv_neg = -np.linalg.inv(FullMglobal)
A = np.zeros((2*num_dof, 2*num_dof), dtype=ct.c_double, order='F')
A[:num_dof, num_dof:] = np.eye(num_dof)
A[num_dof:, :num_dof] = np.dot(Minv_neg, FullKglobal)
A[num_dof:, num_dof:] = np.dot(Minv_neg, FullCglobal)
# Solve the eigenvalues problem
eigenvalues, eigenvectors_left, eigenvectors = \
sc.linalg.eig(A,left=True,right=True)
freq_natural = np.abs(eigenvalues)
damping = np.zeros_like(freq_natural)
iiflex = freq_natural > 1e-16*np.mean(freq_natural) # Pick only structural modes
damping[iiflex] = -eigenvalues[iiflex].real/freq_natural[iiflex]
freq_damped = freq_natural * np.sqrt(1-damping**2)
# Order & downselect complex conj:
# this algorithm assumes that complex conj eigenvalues appear consecutively
# in eigenvalues. For symmetrical systems, this relies on the fact that:
# - complex conj eigenvalues have the same absolute value (to machine
# precision)
# - couples of eigenvalues with multiplicity higher than 1, show larger
# numerical difference
order = np.argsort(freq_damped)[:2*NumLambda]
freq_damped = freq_damped[order]
freq_natural = freq_natural[order]
eigenvalues = eigenvalues[order]
include = np.ones((2*NumLambda,), dtype=np.bool)
ii = 0
tol_rel = np.finfo(float).eps * freq_damped[ii]
while ii < 2*NumLambda:
# check complex
if np.abs(eigenvalues[ii].imag) > 0.:
if np.abs(eigenvalues[ii+1].real-eigenvalues[ii].real) > tol_rel or\
np.abs(eigenvalues[ii+1].imag+eigenvalues[ii].imag) > tol_rel:
raise NameError('Complex conjugate expected but not found!')
ii += 1
| |
import geopandas
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import cm
import matplotlib.ticker as mtick
from matplotlib.ticker import MaxNLocator
import datetime
import datadotworld as dw
def import_geo_data(filename, index_col = "Date", rename_FIPS = "FIPS"):
# import county level shapefile
map_data = geopandas.read_file(filename = filename,
index_col = index_col)
map_data.rename(columns={"COUNTYFP":rename_FIPS, "State":"state"},
inplace = True)
map_data[rename_FIPS] = map_data["STATEFP"].astype(str) + \
map_data[rename_FIPS].astype(str)
map_data[rename_FIPS] = map_data[rename_FIPS].astype(np.int64)
map_data.set_index("fips_code", inplace=True)
cea_data = map_data.to_crs({"proj": "cea"})
map_data["area (sq. km)"] = cea_data.area / 10 ** 6
return map_data
def import_covid_data(filename, fips_name):
# Load COVID19 county data using datadotworld API
# Data provided by Johns Hopkins, file provided by Associated Press
dataset = dw.load_dataset("associatedpress/johns-hopkins-coronavirus-case-tracker")
covid_data = dataset.dataframes["2_cases_and_deaths_by_county_timeseries"]
covid_data = covid_data[covid_data[fips_name] < 57000]
covid_data[fips_name] = covid_data[fips_name].astype(int)
covid_data.set_index([fips_name, "date"], inplace = True)
covid_data.loc[:, "state_abr"] = ""
for state, abr in state_dict.items():
covid_data.loc[covid_data["state"] == state, "state_abr"] = abr
return covid_data
def create_covid_geo_dataframe(covid_data, map_data):
# create geopandas dataframe with multiindex for date
# original geopandas dataframe had no dates, so copies of the df are
# stacked vertically, with a new copy for each date in the covid_data index
#(dates is a global)
i = 0
for date in dates:
df = covid_data[covid_data.index.get_level_values("date")==date]
counties = df.index.get_level_values("fips_code")
agg_df = map_data.loc[counties]
agg_df["date"] = df.index.get_level_values("date")[0]
if i == 0:
matching_gpd = geopandas.GeoDataFrame(agg_df, crs = map_data.crs)
i += 1
else:
matching_gpd = matching_gpd.append(agg_df, ignore_index = False)
matching_gpd.reset_index(inplace=True)
matching_gpd.set_index(["fips_code","date"], inplace = True)
for key, val in covid_data.items():
matching_gpd[key] = val
matching_gpd["Location"] = matching_gpd["NAME"] + ", " + \
matching_gpd["state_abr"]
return matching_gpd
def create_state_dataframe(covid_data):
states = list(state_dict.keys())
states.remove("District of Columbia")
state_data = covid_data.reset_index().set_index(["date", "state","fips_code"]).groupby(["state", "date"]).sum(numeric_only = True,
ignore_index = False)
drop_cols = ["uid", "location_name", "cumulative_cases_per_100_000",
"cumulative_deaths_per_100_000", "new_cases_per_100_000",
"new_deaths_per_100_000",'new_cases_rolling_7_day_avg',
'new_deaths_rolling_7_day_avg']
# These values will be recalculated since the sum of the county values
# would need to be weighted to be meaningful
state_data.drop(drop_cols, axis = 1, inplace = True)
state_data["location_type"] = "state"
for state in states:
state_data.loc[state_data.index.get_level_values("state") == state, "Location"] = state
state_data.loc[state_data.index.get_level_values("state") == state, "state_abr"] = state_dict[state]
return state_data
def create_new_vars(covid_data, moving_average_days):
# covid_data["Population / Sq Km"] = covid_data["total_population"].div(covid_data['area (sq. km)'])
for key in ["cases", "deaths"]:
cap_key = key.title()
covid_data[cap_key + " per Million"] = covid_data["cumulative_" + key].div(covid_data["total_population"]).mul(10 ** 6)
covid_data["Daily " + cap_key + " per Million"] = \
covid_data["cumulative_" + key ].groupby(covid_data.index.names[0])\
.diff(1).div(covid_data["total_population"]).mul(10 ** 6)
covid_data["Daily " + cap_key + " per Million MA"] = covid_data["Daily " + \
cap_key + " per Million"].rolling(moving_average_days).mean()
def create_zero_day_dict(covid_data, start_date):
zero_day_dict = {}
for key in ["Cases", "Deaths"]:
zero_day_dict[key + " per Million"] = {}
zero_day_dict["Daily " + key + " per Million MA"] = {}
day_zero_val = {}
for key in zero_day_dict:
day_zero_val[key] = 2 if "Deaths" in key else 10
entities = sorted(list(set(covid_data.index.get_level_values(0))))
for key in zero_day_dict.keys():
vals = covid_data[key]
thresh_vals = covid_data["Deaths per Million"] if "Deaths" in key else \
covid_data["Cases per Million"]
dz_val = day_zero_val[key]
for entity in entities:
dpc = vals[vals.index.get_level_values(0) == entity][thresh_vals > dz_val]
dpc = dpc[dpc.index.get_level_values("date") > start_date]
zero_day_dict[key][entity] = dpc.copy()
print(entity)
return zero_day_dict, day_zero_val
def plot_zero_day_data(state_name, state, covid_data, zero_day_dict,
day_zero_val, keys, entity_type, entities, pp,
n_largest = 10, bold_entities = None, daily = False):
max_x = 0
fig, a = plt.subplots(2,1, figsize = (48, 32))
for key in keys:
val_key = "Daily " + key + " MA" if daily else key
if len(entities) > 0:
i = 0
j = 0
ax = a[0] if "Cases" in key else a[1]
max_x, max_y = plot_double_lines(ax, zero_day_dict, day_zero_val, val_key, entities, daily)
locs, top_locs = identify_plot_locs(state_name, covid_data, bold_entities)
for entity in entities:
vals = zero_day_dict[val_key][entity]
if len(vals) > 0 and entity != "District of Columbia":
loc = locs[locs.index.get_level_values(entity_type) == entity]["Location"][0]
i, j = plot_lines_and_text(ax, vals, state, state_dict, loc,
top_locs, colors_dict, i, j)
# set plot attributes
if daily:
ax.set_ylim(bottom = 0, top = max_y * 1.08)
else:
ax.set_yscale('log')
if max_y is not np.nan:
ax.set_ylim(bottom = np.e ** (np.log(day_zero_val[key])), top = np.e ** (np.log(max_y * 4) ))
vals = ax.get_yticks()
ax.set_yticklabels([int(y) if y >= 1 else round(y,1) for y in vals])
ax.set_ylabel(val_key)
ax.set_xlim(right = max_x + 10)
ax.set_xlabel("Days Since " + key + " Exceeded " + str(day_zero_val[key]))
title = str(end_date)[:10] + "\n7 Day Moving Average" + "\nCOVID-19 in " + state_name if daily else str(end_date)[:10] + "\nCOVID-19 in " + state_name
y_pos = .987 if daily else .95
fig.suptitle(title , y=y_pos, fontsize = 75)
pp.savefig(fig, bbox_inches = "tight")
plt.savefig("statePlots/" + state + " " + val_key + ".png", bbox_inches = "tight")
plt.show()
plt.close()
def plot_double_lines(ax, zero_day_dict, day_zero_val, key, entities, daily):
max_x = max([len(zero_day_dict[key][entity]) for entity in entities])
max_y = max([zero_day_dict[key][entity].max() for entity in entities])
if not daily:
double_lines ={}
for i in [2,3,5]:
double_lines[i] = [day_zero_val[key] * 2 ** (k/i) for k in range(9 * i)]
ax.plot(double_lines[i], label = None,
alpha = .2, color = "k", linewidth = 5)
ax.text(len(double_lines[i]),
double_lines[i][len(double_lines[i])-1],
"X2 every \n" + str(i) + " days", alpha = .2)
max_y2 = max(val[-1] for val in double_lines.values())
max_y = max_y if max_y > max_y2 else max_y2
return max_x, max_y
def identify_plot_locs(state_name, covid_data, bold_entities):
if state_name == "United States":
locs = covid_data
top_locs = covid_data[covid_data["state_abr"].isin(bold_entities)]
else:
locs = covid_data[covid_data["state"] == state_name][["Location", "state_abr", "total_population"]]
top_locs = locs[locs.index.get_level_values("date")==locs.index.get_level_values("date")[0]]
top_locs = top_locs[top_locs["total_population"] >= top_locs["total_population"].nlargest(n_largest).min()]["Location"]
return locs, top_locs
def plot_lines_and_text(ax, vals, state, state_dict, loc, top_locs, colors_dict,
i, j):
def select_color(loc, top_locs, colors_dict, colors, i, j):
val = i if loc in top_locs.values else j
if loc not in colors_dict.keys():
colors_dict[loc] = colors[val % 10]
color = colors_dict[loc]
if loc in top_locs.values: i += 1
else: j += 1
return color, i, j
color, i, j = select_color(loc, top_locs, colors_dict, colors, i, j)
label = state_dict[loc] if state in "U.S.A." else loc[:-4].replace(" ", "\n")
linewidth, ls, fontsize, alpha = (6, "-", 34, 1) if loc in top_locs.values else (2, "--", 24, .6)
ax.plot(vals.values, label = label,
ls = ls, linewidth = linewidth, alpha = alpha, color = color)
ax.text(x = len(vals.values) - 1, y = vals.values[-1], s = label,
fontsize = fontsize, color = color, alpha = alpha)
return i, j
def select_data_within_bounds(data, minx, miny, maxx, maxy):
data = data[data.bounds["maxx"] <= maxx]
data = data[data.bounds["maxy"] <= maxy]
data = data[data.bounds["minx"] >= minx]
data = data[data.bounds["miny"] >= miny]
return data
def plot_map(i, *fargs):
ax.clear()
date = dates[i]
# cmap = cm.get_cmap('YlOrBr', 8)
cmap = cm.get_cmap('Reds', 4)
vmin = 1 if "Deaths" in key else 10
print(key, date)
plt.cm.ScalarMappable(cmap=cmap, norm=cm.colors.LogNorm(vmin=vmin,
vmax =vmax))#round(vmax, len(str(vmax))-1)))
plot_df = val[val.index.get_level_values("date")==date]
plot_df.plot(ax=ax, cax = ax, column=key, vmin=vmin ,vmax = vmax,
cmap = cmap, legend=False, linewidth=.5, edgecolor='lightgrey',
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax))
ax.set_title(str(date)[:10] + "\n" + "COVID-19 in the U.S.", fontsize = 30)
ax.axis("off")
def init():
# Create colorbar as a legend
cmap = cm.get_cmap('Reds', 4)
vmin = 1 if "Deaths" in key else 10
print(vmin, vmax)
size = "5%"
sm = plt.cm.ScalarMappable(cmap=cmap, norm=cm.colors.LogNorm(vmin=vmin,
vmax =vmax))#round(vmax, len(str(vmax))-1)))
# empty array for the data range
sm._A = []
# add the colorbar to the figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size = size, pad = 0.1)
cbar = fig.colorbar(sm, cax=cax, cmap = cmap)
cbar.ax.tick_params(labelsize=18)
vals = list(cbar.ax.get_yticks())
vals.append(vmax)
print(vals)
cbar.ax.yaxis.set_major_formatter(mtick.LogFormatter())
cbar.ax.set_yticklabels([int(x) for x in vals])
cbar.ax.set_ylabel(key, fontsize = 20)
# I maintained this dictionary to use the state abbrevations in the names of
# saved files.
state_dict = {
'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ',
'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT',
'Delaware': 'DE', 'District of Columbia': 'DC', 'Florida': 'FL',
'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID', 'Illinois': 'IL',
'Indiana': 'IN', 'Iowa': 'IA','Kansas': 'KS', 'Kentucky': 'KY',
'Louisiana': 'LA', 'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA',
'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS', 'Missouri': 'MO',
'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH',
'New Jersey': 'NJ', 'New Mexico': 'NM', 'New | |
tag: database.IgnoreThis = database.IgnoreThis.create(channelID = channel.id, authorID = VCMember.id)
tag.save()
print(f"Added: {VCMember.id}")
await channel.delete()
q.delete_instance()
embed = discord.Embed(title = f"{Emoji.archive} Force Ended Session", description = "Session has been forcefully removed.", color = discord.Colour.blue())
embed.add_field(name = "Time Spent", value = f"<@{q.authorID}> you have spent a total of {Emoji.calender} `{day} minutes` in voice channel, **{q.name}**.")
embed.set_footer(text = "WARNING: Time displayed may not be accurate.")
await ctx.send(embed = embed)
else:
await channel.delete()
embed = discord.Embed(title = f"{Emoji.warn} Partial Completion", description = "The database indicates there is no owner or data related to this voice channel but I have still deleted the channel!", color = discord.Colour.gold())
await ctx.send(embed = embed)
print(query.authorID)
else:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Channel", description = "You are not the owner of this voice channel nor is this a valid channel. Please execute the command under a valid voice channel!", color = discord.Colour.red())
await ctx.send(embed = embed)
database.db.close()
@commands.command()
@commands.cooldown(1, 15, commands.BucketType.user)
async def lock(self, ctx):
database.db.connect(reuse_if_open=True)
member = ctx.guild.get_member(ctx.author.id)
BOT = ctx.guild.get_member(self.botID)
OWNER = ctx.guild.get_member(self.ownerID)
TMOD = discord.utils.get(ctx.guild.roles, name= self.TMOD)
MOD = discord.utils.get(ctx.guild.roles, name= self.MOD)
SMOD = discord.utils.get(ctx.guild.roles, name= self.SMOD)
CO = discord.utils.get(ctx.guild.roles, name= self.CO)
VP = discord.utils.get(ctx.guild.roles, name= self.VP)
ST = discord.utils.get(ctx.guild.roles, name=self.ST)
voice_state = member.voice
if voice_state == None:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
else:
if voice_state.channel.id in self.presetChannels:
embed = discord.Embed(title = f"{Emoji.deny} UnAuthorized Channel Modification", description = "You are not allowed to modify these channels!\n\n**Error Detection:**\n**1)** Detected Static Channels", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
if member.voice.channel.category_id == self.categoryID:
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id))
if query.exists():
LOCK : database.VCChannelInfo = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id)).get()
LOCK.lockStatus = "1"
LOCK.save()
await member.voice.channel.set_permissions(BOT, connect = True, manage_channels = True, manage_permissions = True)
await member.voice.channel.set_permissions(OWNER, connect = True, manage_channels = True, manage_permissions = True)
await member.voice.channel.set_permissions(member, connect = True)
await member.voice.channel.set_permissions(ctx.guild.default_role, connect = False)
await member.voice.channel.set_permissions(TMOD, connect = True)
await member.voice.channel.set_permissions(MOD, connect = True)
await member.voice.channel.set_permissions(SMOD, connect = True)
await member.voice.channel.set_permissions(ST, connect = True)
await member.voice.channel.set_permissions(VP, connect = True, manage_channels = True, manage_permissions = True)
embed = discord.Embed(title = f"{Emoji.lock} Locked Voice Channel", description = "Your voice channel has been locked and now only authorized users can join it!\n\n**NOTE:** Moderators and other Administrators will always be allowed into your voice channels!", color = discord.Colour.green())
await ctx.send(embed = embed)
else:
try:
q = database.VCChannelInfo.select().where(database.VCChannelInfo.ChannelID == voice_state.channel.id).get()
except:
embed = discord.Embed(title = f"{Emoji.deny} Ownership Check Failed", description = "This isn't a valid voice channel! Please use the command on an actual voice channel thats under the correct category!", color = discord.Colour.red())
else:
embed = discord.Embed(title = f"{Emoji.deny} Ownership Check Failed", description = f"You are not the owner of this voice channel, please ask the original owner <@{q.authorID}>, to end it!", color = discord.Colour.red())
finally:
await ctx.send(embed = embed)
else:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Channel", description = "You are not the owner of this voice channel nor is this a valid channel. Please execute the command under a channel you own!", color = discord.Colour.red())
await ctx.send(embed = embed)
database.db.close()
@commands.command()
async def settutor(self, ctx, tutorcode):
TR = discord.utils.get(ctx.guild.roles, name=self.TutorRole)
if TR not in ctx.author.roles:
return await ctx.message.add_reaction("❌")
else:
member = ctx.guild.get_member(ctx.author.id)
voice_state = member.voice
if voice_state == None:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
tutorSession = database.TutorBot_Sessions.select().where(database.TutorBot_Sessions.SessionID == tutorcode)
if tutorSession.exists():
tutorSession = tutorSession.get()
if member.voice.channel.category_id == self.categoryID:
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id))
if query.exists():
query = query.get()
student = await self.bot.fetch_user(tutorSession.StudentID)
tutor = await self.bot.fetch_user(tutorSession.TutorID)
date = tutorSession.strftime("%m/%d/%Y")
query.TutorBotSessionID = tutorcode
query.save()
hourlog = discord.Embed(title = "Tutor Session Convert Complete", description = f"I have successfully converted this voice session into a tutor session, when you end this session I will log this session for you.", color = discord.Colour.blue())
hourlog.add_field(name = "Information", value = f"**Tutor:** {tutor.mention}\n**Student: {student.mention}\n**Date:** {date}")
await ctx.send(embed = hourlog)
else:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
else:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
else:
embed = discord.Embed(title = "Invalid Session", description = "This session does not exist, please check the ID you've provided!", color = discord.Color.red())
await ctx.send(embed = embed)
@commands.command()
@commands.cooldown(1, 15, commands.BucketType.user)
async def unlock(self, ctx):
database.db.connect(reuse_if_open=True)
member = ctx.guild.get_member(ctx.author.id)
timestamp2 = datetime.now()
voice_state = member.voice
if voice_state == None:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
else:
if voice_state.channel.id in self.presetChannels:
embed = discord.Embed(title = f"{Emoji.deny} UnAuthorized Channel Modification", description = "You are not allowed to modify these channels!\n\n**Error Detection:**\n**1)** Detected Static Channels", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
if member.voice.channel.category_id == self.categoryID:
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id))
if query.exists():
LOCK : database.VCChannelInfo = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id)).get()
LOCK.lockStatus = "0"
LOCK.save()
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id)).get()
print(query.lockStatus)
await member.voice.channel.edit(sync_permissions=True)
embed = discord.Embed(title = f"{Emoji.unlock} Unlocked Voice Channel", description = "Your voice channel has been unlocked and now anyone can join it!", color = discord.Colour.green())
await ctx.send(embed = embed)
else:
try:
q = database.VCChannelInfo.select().where(database.VCChannelInfo.ChannelID == voice_state.channel.id).get()
except:
embed = discord.Embed(title = f"{Emoji.deny} Ownership Check Failed", description = "This isn't a valid voice channel! Please use the command on an actual voice channel thats under the correct category!", color = discord.Colour.red())
else:
embed = discord.Embed(title = f"{Emoji.deny} Ownership Check Failed", description = f"You are not the owner of this voice channel, please ask the original owner <@{q.authorID}>, to end it!", color = discord.Colour.red())
finally:
await ctx.send(embed = embed)
else:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Channel", description = "You are not the owner of this voice channel nor is this a valid channel. Please execute the command under a channel you own!", color = discord.Colour.red())
await ctx.send(embed = embed)
database.db.close()
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def permit(self, ctx, typeAction, user: discord.Member = None):
database.db.connect(reuse_if_open=True)
member = ctx.guild.get_member(ctx.author.id)
timestamp2 = datetime.now()
voice_state = member.voice
if voice_state == None:
embed = discord.Embed(title = f"{Emoji.warn} Unknown Voice Channel", description = "You have to be in a voice channel you own in order to use this!", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
else:
if voice_state.channel.id in self.presetChannels:
embed = discord.Embed(title = f"{Emoji.deny} UnAuthorized Channel Modification", description = "You are not allowed to modify these channels!\n\n**Error Detection:**\n**1)** Detected Static Channels", color = discord.Colour.dark_red())
return await ctx.send(embed = embed)
if member.voice.channel.category_id == self.categoryID:
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id))
if query.exists():
query = database.VCChannelInfo.select().where((database.VCChannelInfo.authorID == ctx.author.id) & (database.VCChannelInfo.ChannelID == voice_state.channel.id)).get()
print(query.lockStatus)
if query.lockStatus == "0":
embed = discord.Embed(title = f"{Emoji.deny} Invalid Setup", description = "Hey there! This voice channel is already open to the public, if you want to limit its access to certain people. Then consider using `+lock` and then come back this command!", color = discord.Colour.blurple())
return await ctx.send(embed = embed)
else:
if typeAction == "+" or typeAction.lower() == "add":
if user == None:
return await ctx.send(f"{Emoji.deny} Invalid User Provided...")
await member.voice.channel.set_permissions(user, connect = True)
embed = discord.Embed(title = f"{Emoji.addgear} Permit Setup", description = f"{user.mention} now has access to this | |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - AttachFile action
This action lets a page have multiple attachment files.
It creates a folder <data>/pages/<pagename>/attachments
and keeps everything in there.
Form values: action=Attachment
1. with no 'do' key: returns file upload form
2. do=attach: accept file upload and saves the file in
../attachment/pagename/
3. /pagename/fname?action=Attachment&do=get[&mimetype=type]:
return contents of the attachment file with the name fname.
4. /pathname/fname, do=view[&mimetype=type]:create a page
to view the content of the file
To link to an attachment, use [[attachment:file.txt]],
to embed an attachment, use {{attachment:file.png}}.
@copyright: 2001 by <NAME> (<EMAIL>),
2001-2004 by <NAME> <<EMAIL>>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE (<EMAIL>),
2005-2013 MoinMoin:ReimarBauer,
2007-2008 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
import os, time, zipfile, errno, datetime
from StringIO import StringIO
import tarfile
from werkzeug import http_date
from MoinMoin import log
logging = log.getLogger(__name__)
# keep both imports below as they are, order is important:
from MoinMoin import wikiutil
import mimetypes
from MoinMoin import config, packages
from MoinMoin.Page import Page
from MoinMoin.util import filesys, timefuncs
from MoinMoin.security.textcha import TextCha
from MoinMoin.events import FileAttachedEvent, FileRemovedEvent, send_event
action_name = __name__.split('.')[-1]
#############################################################################
### External interface - these are called from the core code
#############################################################################
class AttachmentAlreadyExists(Exception):
pass
def getBasePath(request):
""" Get base path where page dirs for attachments are stored. """
return request.rootpage.getPagePath('pages')
def getAttachDir(request, pagename, create=0):
""" Get directory where attachments for page `pagename` are stored. """
if request.page and pagename == request.page.page_name:
page = request.page # reusing existing page obj is faster
else:
page = Page(request, pagename)
return page.getPagePath("attachments", check_create=create)
def absoluteName(url, pagename):
""" Get (pagename, filename) of an attachment: link
@param url: PageName/filename.ext or filename.ext (unicode)
@param pagename: name of the currently processed page (unicode)
@rtype: tuple of unicode
@return: PageName, filename.ext
"""
url = wikiutil.AbsPageName(pagename, url)
pieces = url.split(u'/')
if len(pieces) == 1:
return pagename, pieces[0]
else:
return u"/".join(pieces[:-1]), pieces[-1]
def get_action(request, filename, do):
generic_do_mapping = {
# do -> action
'get': action_name,
'view': action_name,
'move': action_name,
'del': action_name,
'unzip': action_name,
'install': action_name,
'upload_form': action_name,
}
basename, ext = os.path.splitext(filename)
do_mapping = request.cfg.extensions_mapping.get(ext, {})
action = do_mapping.get(do, None)
if action is None:
# we have no special support for this,
# look up whether we have generic support:
action = generic_do_mapping.get(do, None)
return action
def getAttachUrl(pagename, filename, request, addts=0, do='get'):
""" Get URL that points to attachment `filename` of page `pagename`.
For upload url, call with do='upload_form'.
Returns the URL to do the specified "do" action or None,
if this action is not supported.
"""
action = get_action(request, filename, do)
if action:
args = dict(action=action, do=do, target=filename)
if do not in ['get', 'view', # harmless
'modify', # just renders the applet html, which has own ticket
'move', # renders rename form, which has own ticket
]:
# create a ticket for the not so harmless operations
# we need action= here because the current action (e.g. "show" page
# with a macro AttachList) may not be the linked-to action, e.g.
# "AttachFile". Also, AttachList can list attachments of another page,
# thus we need to give pagename= also.
args['ticket'] = wikiutil.createTicket(request,
pagename=pagename, action=action_name)
url = request.href(pagename, **args)
return url
def getIndicator(request, pagename):
""" Get an attachment indicator for a page (linked clip image) or
an empty string if not attachments exist.
"""
_ = request.getText
attach_dir = getAttachDir(request, pagename)
if not os.path.exists(attach_dir):
return ''
files = os.listdir(attach_dir)
if not files:
return ''
fmt = request.formatter
attach_count = _('[%d attachments]') % len(files)
attach_icon = request.theme.make_icon('attach', vars={'attach_count': attach_count})
attach_link = (fmt.url(1, request.href(pagename, action=action_name), rel='nofollow') +
attach_icon +
fmt.url(0))
return attach_link
def getFilename(request, pagename, filename):
""" make complete pathfilename of file "name" attached to some page "pagename"
@param request: request object
@param pagename: name of page where the file is attached to (unicode)
@param filename: filename of attached file (unicode)
@rtype: string (in config.charset encoding)
@return: complete path/filename of attached file
"""
if isinstance(filename, unicode):
filename = filename.encode(config.charset)
return os.path.join(getAttachDir(request, pagename, create=1), filename)
def exists(request, pagename, filename):
""" check if page <pagename> has a file <filename> attached """
fpath = getFilename(request, pagename, filename)
return os.path.exists(fpath)
def size(request, pagename, filename):
""" return file size of file attachment """
fpath = getFilename(request, pagename, filename)
return os.path.getsize(fpath)
def info(pagename, request):
""" Generate snippet with info on the attachment for page `pagename`. """
_ = request.getText
attach_dir = getAttachDir(request, pagename)
files = []
if os.path.isdir(attach_dir):
files = os.listdir(attach_dir)
page = Page(request, pagename)
link = page.url(request, {'action': action_name})
attach_info = _('There are <a href="%(link)s">%(count)s attachment(s)</a> stored for this page.') % {
'count': len(files),
'link': wikiutil.escape(link)
}
return "\n<p>\n%s\n</p>\n" % attach_info
def _write_stream(content, stream, bufsize=8192):
if hasattr(content, 'read'): # looks file-like
import shutil
shutil.copyfileobj(content, stream, bufsize)
elif isinstance(content, str):
stream.write(content)
else:
logging.error("unsupported content object: %r" % content)
raise
def add_attachment(request, pagename, target, filecontent, overwrite=0):
""" save <filecontent> to an attachment <target> of page <pagename>
filecontent can be either a str (in memory file content),
or an open file object (file content in e.g. a tempfile).
"""
# replace illegal chars
target = wikiutil.taintfilename(target)
# get directory, and possibly create it
attach_dir = getAttachDir(request, pagename, create=1)
fpath = os.path.join(attach_dir, target).encode(config.charset)
exists = os.path.exists(fpath)
if exists:
if overwrite:
remove_attachment(request, pagename, target)
else:
raise AttachmentAlreadyExists
# save file
stream = open(fpath, 'wb')
try:
_write_stream(filecontent, stream)
finally:
stream.close()
_addLogEntry(request, 'ATTNEW', pagename, target)
filesize = os.path.getsize(fpath)
event = FileAttachedEvent(request, pagename, target, filesize)
send_event(event)
return target, filesize
def remove_attachment(request, pagename, target):
""" remove attachment <target> of page <pagename>
"""
# replace illegal chars
target = wikiutil.taintfilename(target)
# get directory, do not create it
attach_dir = getAttachDir(request, pagename, create=0)
# remove file
fpath = os.path.join(attach_dir, target).encode(config.charset)
try:
filesize = os.path.getsize(fpath)
os.remove(fpath)
except:
# either it is gone already or we have no rights - not much we can do about it
filesize = 0
else:
_addLogEntry(request, 'ATTDEL', pagename, target)
event = FileRemovedEvent(request, pagename, target, filesize)
send_event(event)
return target, filesize
class SamePath(Exception):
"""
raised if an attachment move is attempted to same target path
"""
class DestPathExists(Exception):
"""
raised if an attachment move is attempted to an existing target path
"""
def move_attachment(request, pagename, dest_pagename, target, dest_target,
overwrite=False):
""" move attachment <target> of page <pagename>
to attachment <dest_target> of page <dest_pagename>
note: this is lowlevel code, acl permissions need to be checked before
and also the target page should somehow exist (can be "deleted",
but the pagedir should be there)
"""
# replace illegal chars
target = wikiutil.taintfilename(target)
dest_target = wikiutil.taintfilename(dest_target)
attachment_path = os.path.join(getAttachDir(request, pagename),
target).encode(config.charset)
dest_attachment_path = os.path.join(getAttachDir(request, dest_pagename, create=1),
dest_target).encode(config.charset)
if not overwrite and os.path.exists(dest_attachment_path):
raise DestPathExists
if dest_attachment_path == attachment_path:
raise SamePath
filesize = os.path.getsize(attachment_path)
try:
filesys.rename(attachment_path, dest_attachment_path)
except Exception:
raise
else:
_addLogEntry(request, 'ATTDEL', pagename, target)
event = FileRemovedEvent(request, pagename, target, filesize)
send_event(event)
_addLogEntry(request, 'ATTNEW', dest_pagename, dest_target)
event = FileAttachedEvent(request, dest_pagename, dest_target, filesize)
send_event(event)
return dest_target, filesize
def copy_attachment(request, pagename, dest_pagename, target, dest_target,
overwrite=False):
""" copy attachment <target> of page <pagename>
to attachment <dest_target> of page <dest_pagename>
note: this is lowlevel code, acl permissions need to be checked before
and also the target page should somehow exist (can be "deleted",
but the pagedir should be there)
"""
# replace illegal chars
target = wikiutil.taintfilename(target)
dest_target = wikiutil.taintfilename(dest_target)
attachment_path = os.path.join(getAttachDir(request, pagename),
target).encode(config.charset)
dest_attachment_path = os.path.join(getAttachDir(request, dest_pagename, create=1),
dest_target).encode(config.charset)
if not overwrite and os.path.exists(dest_attachment_path):
raise DestPathExists
if dest_attachment_path == attachment_path:
raise SamePath
filesize = os.path.getsize(attachment_path)
try:
filesys.copy(attachment_path, dest_attachment_path)
except Exception:
raise
else:
_addLogEntry(request, 'ATTNEW', dest_pagename, dest_target)
event = FileAttachedEvent(request, dest_pagename, dest_target, filesize)
send_event(event)
return dest_target, filesize
#############################################################################
### Internal helpers
#############################################################################
def _addLogEntry(request, action, pagename, filename):
""" Add an entry to the edit log on uploads and deletes.
`action` should be "ATTNEW" or "ATTDEL"
"""
from MoinMoin.logfile import editlog
t = wikiutil.timestamp2version(time.time())
fname = wikiutil.url_quote(filename)
# Write to global log
log = editlog.EditLog(request)
log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)
# Write to local log
log = editlog.EditLog(request, rootpagename=pagename)
log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)
def _access_file(pagename, request):
""" Check form parameter `target` and return a tuple of
`(pagename, filename, filepath)` for an existing attachment.
Return `(pagename, None, None)` if an error occurs.
"""
_ = request.getText
error = None
| |
+ 2.0 * qz * qw, 1.0 - 2.0 * qx * qx - 2.0 * qz * qz, 2.0 * qy * qz - 2.0 * qx * qw],
[2.0 * qx * qz - 2.0 * qy * qw,2.0 * qy * qz + 2.0 * qx * qw, 1.0 - 2.0 * qy * qy - 2.0 * qx * qx]])
def add(self, q2):
"""Adds two quaternions
Arguments:
q2 -- A quaternion, to be added to self
Returns:
A Quaternion, with the values corresponding to self + q2
"""
return Quaternion(self.v[0] + q2.v[0], self.v[1] + q2.v[1], self.v[2] + q2.v[2], self.v[3] + q2.v[3])
def invert(self):
"""Takes the inverse of the quaternion for "division"
Returns:
A Quaternion, with the values corresponding to self^-1
"""
return Quaternion(self.v[0], -1 * self.v[1], -1 * self.v[2], -1 * self.v[3])
def minus(self, q2):
"""Multiplies two quaternions
Arguments:
q2 -- A quaternion, to be subtracted from self
Returns:
A Quaternion, with the values corresponding to self - q2
"""
return Quaternion(self.v[0] - q2.v[0], self.v[1] - q2.v[1], self.v[2] - q2.v[2], self.v[3] - q2.v[3])
def multiply(self, q2):
"""Multiplies two quaternions
Arguments:
q2 -- A quaternion, to be multiplied with self
Returns:
A Quaternion, with the values corresponding to self * q2
"""
return Quaternion(self.v[0] * q2.v[0] - self.v[1] * q2.v[1] - self.v[2] * q2.v[2] - self.v[3] * q2.v[3],
self.v[1] * q2.v[0] + self.v[0] * q2.v[1] + self.v[2] * q2.v[3] - self.v[3] * q2.v[2],
self.v[0] * q2.v[2] - self.v[1] * q2.v[3] + self.v[2] * q2.v[0] + self.v[3] * q2.v[1],
self.v[0] * q2.v[3] + self.v[1] * q2.v[2] - self.v[2] * q2.v[1] + self.v[3] * q2.v[0])
def normalize(self):
"""Normalizes the quaternion
Returns:
A normalized Quaternion
"""
#First normalize
n = math.sqrt(math.pow(self.v[0],2) + math.pow(self.v[1],2) + math.pow(self.v[2],2) + math.pow(self.v[3],2))
return Quaternion(self.v[0] / n, self.v[1] / n, self.v[2] / n, self.v[3] / n)
def scale(self, scalar):
"""Scales a quaternion
Arguments:
scalar -- the value to scale the quaternion by
Returns:
A Quaternion, with the values corresponding to self * scalar
"""
return Quaternion(self.v[0] * scalar, self.v[1] * scalar, self.v[2] * scalar, self.v[3] * scalar)
def get_numpy_slice(numpy_array, indices):
"""A replacement for numpy's numpy_array[indices] functionality, where numpy_array and indices are both matrices.
Unlike numpy's default behavior, this function returns an empty array if b is empty, rather than throwing an error.
Arguments:
numpy_array -- A numpy array
indices -- A numpy array, the indices of the elements of numpy_array to return.
Returns:
A numpy array, numpy_array[indices] if indices is not empty, an empty numpy array otherwise.
"""
try: return numpy_array[indices]
except:
if len(indices) == 0: return numpy.array([]) # why in the world isn't this numpy's default behavior?
else: print "Error!"
################## MODIFYING AND MANIPULATING MOLECULAR MODELS ##################
class Molecule:
"""Loads, saves, and manupulates molecuar models."""
def __init__ (self):
"""Initializes the variables of the Molecule class."""
self.in_triangle_margin = True
self.in_triangle_submargin = False
self.headgroup_index = None
def get_headgroup_index(self, lipid_headgroup_marker):
"""Get's the indices of the current molecule's headgroup
Arguments:
lipid_headgroup_marker -- A tuple of the form (chain, resname, resid, atomname) specifying the headgroup
Returns:
An integer, the index of this molecule's headgroup
"""
if self.headgroup_index == None: self.headgroup_index = self.get_indices_of_mask_match(lipid_headgroup_marker)[0] # so calculate it only if it's never been calculated before
return self.headgroup_index
def load_pdb(self, filename):
"""Loads a PDB file into the current Molecule object from a file
Arguments:
filename -- a string, the name of the file to load
"""
# Now load the file into a list
file = open(filename,"r")
lines = file.readlines()
file.close()
# load the molecule from the list
self.load_pdb_from_lines(lines)
def load_pdb_from_lines(self, lines):
"""Loads a PDB file into the current Molecule object from a list of PDB lines
Arguments:
lines -- a list, containing the PDB lines to load into the current object
"""
self.__init__()
gc.disable() # because appending objects slows down code if garbage collection turned on
# set up the numpy arrays to store the data
self.atom_inf_string_vals = numpy.empty((len(lines), 4), dtype='|S9') # chain, resname, atomname, id_keys
self.atom_inf_resids = numpy.empty(len(lines), dtype='|S4')
self.all_atoms_numpy = numpy.empty((len(lines), 3))
# read in the data from the lines
count = 0
for t in range(0,len(lines)):
line=lines[t]
if len(line) >= 7:
if line[0:4]=="ATOM" or line[0:6]=="HETATM": # Load atom data (coordinates, etc.)
count = count + 1
self.all_atoms_numpy[t][0] = float(line[30:38])
self.all_atoms_numpy[t][1] = float(line[38:46])
self.all_atoms_numpy[t][2] = float(line[46:54])
resname = line[16:21].strip()
atomname = line[11:16].strip()
try: resid = line[22:26].strip()
except: resid = "0"
self.atom_inf_string_vals[t][0] = line[21:22].strip() # chain
self.atom_inf_string_vals[t][1] = resname # resname
self.atom_inf_string_vals[t][2] = atomname # atomname
self.atom_inf_string_vals[t][3] = resname + "_" + atomname # id_keys
self.atom_inf_resids[t] = resid
gc.enable()
# now resize the array, cutting out bottom parts that were never populated
self.atom_inf_string_vals = self.atom_inf_string_vals[:count]
self.atom_inf_resids = self.atom_inf_resids[:count]
self.all_atoms_numpy = self.all_atoms_numpy[:count]
def save_pdb(self, filename):
"""Saves data to a PDB file.
Arguments:
filename -- A string, the filename to be written.
"""
toprint = ""
file = open(filename,"w")
for index in range(len(self.all_atoms_numpy)): file.write(self.create_pdb_line(index) + "\n")
file.close()
def set_undo_point(self): # you can restore all atom positions to some undo point. This sets that point.
"""Sets ("saves") the undo point of all atoms. Any subsequent manipulations of atomic coordinates can be "undone" by reseting to this configuration."""
self.all_atoms_numpy_undo = numpy.copy(self.all_atoms_numpy)
def undo(self):
"""Resets the coordinates of all atoms to those saved using the set_undo_point function."""
self.all_atoms_numpy = numpy.copy(self.all_atoms_numpy_undo)
def rotate_mol_quat(self, rot_quat):
"""Support function that rotates a molecule according to a rotation quaternion
Arguments:
mol -- A molecule to be rotated in matrix format
rot_quat -- Quaternion to rotate molecule
"""
rot_mat = rot_quat.to_matrix()
self.all_atoms_numpy = numpy.dot(self.all_atoms_numpy,rot_mat)
def baseN(self, num, b, numerals="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
"""Return the value of a number in another base
Arguments:
num -- An integer, the number in base 10.
b -- An integer, the number of the new base
numerals -- An optional string, containing the numerals to use in the new base
Returns:
A string, the representation of the original integer, now in the specified base
"""
return ((num == 0) and numerals[0]) or (self.baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def create_pdb_line(self, index, output_index=None, output_resid=None):
"""Create a string formatted according to the PDB standard from the atomic information contained in this atom class.
Arguments:
index -- An integer, the index of the atom in the Molecule object.
output_index -- An optional integer, the index to use in the PDB-line output. If not specified, index is used.
output_resid -- An optional integer, the resid to use in the PDB-line output. If not specified, the existing resid is used.
Returns:
A string, formatted according to the PDB standard.
"""
# use user-specified index if provided
if output_index is None: output_index = str(index)
else: output_index = str(output_index)
# PDB format is fixed column, so if the index is too big just turn it into stars
if len(output_index) >= 7: output_index = "******"
# use the user-specified resid if provided
if output_resid is None: output_resid = self.atom_inf_resids[index]
else: output_resid = str(output_resid)
# PDB format is fixed column, so if the resid is too big, switch over to a string identifier that is unique to each residue
if len(output_resid) >= 5: # you need to start using letters in the resid after 9999
# 2383280 is "a001" in base 62.
# so map 10000 to 2383280 and convert to base 62.
output_resid = self.baseN(int(output_resid) + 2373280, 62)
# max using this method is 35999 residues
# create the PDB line
output = "ATOM "
output = output + str(output_index).rjust(6) + self.atom_inf_string_vals[index][2].rjust(5) + self.atom_inf_string_vals[index][1].rjust(5) + self.atom_inf_string_vals[index][0].rjust(1) + | |
for all trajectories.
>>> trajectories.get_duration()
"""
# ------------------------------------
# Return duration with get_duration().
# ------------------------------------
t_total = get_duration(self)
# ---------------------------
# Adding t_total to DataSet.
# ---------------------------
# Append t_total DataArray to original DataSet.
self.data['t_total'] = xr.DataArray(t_total, dims=["traj"])
# Adding attributes to t_total DataArray.
self.data.t_total.attrs = {
'long_name': "duration",
'standard_name': "t_total",
'units': "ns"
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define get_value() function.
def get_value(self, variable, time_level):
"""
Returns the value of a specified variable at a specified
time level for each trajectory.
The values of the specified variable are returned for all
trajectories for a time level specified in the form
'YYYY-MM-DD' as a new DataArray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
variable : string
Name of the variable in the trajectories object.
Returns
-------
DataSet.
Original DataSet is returned with appended attribute
variable {variable}_max DataArray containing the min
values along each trajectory, with dimension (traj).
Examples
--------
Get the value of temperature for each trajectory at time
level 2000-01-31. Note that we must convert time to datetime64
format before using .get_value().
>>> trajectories.use_datetime(start_time='2000-01-01').get_value('temp', '2000-01-31')
"""
# -------------------
# Raising exceptions.
# -------------------
# Defining list of variables contained in data.
variables = list(self.data.variables)
if isinstance(variable, str) is False:
raise TypeError("variable must be specified as a string")
if variable not in variables:
raise ValueError("variable: \'" + variable + "\' not found in Dataset")
if isinstance(time_level, str) is False:
raise TypeError("time_level must be specified as a string in the format YYYY-MM-DD")
# ----------------------------------------------------------
# Returning values of variable at time level with get_val().
# ----------------------------------------------------------
values = get_val(self=self, variable=variable, time_level=time_level)
# -------------------------
# Adding values to DataSet.
# -------------------------
# Defining std. name of values using specified variable.
std_name = variable + "_i"
# Defining long name of values using specified variable.
long_name = variable + " at " + time_level
# Append min_values DataArray to original DataSet.
self.data[std_name] = xr.DataArray(values, dims=["traj"])
# Adding attributes to min_values DataArray.
self.data[std_name].attrs = {
'long_name': long_name,
'standard_name': std_name,
'units': self.data[variable].attrs['units']
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define get_max() function.
def get_max(self, variable):
"""
Returns maximum value of a specified variable for each trajectory.
The maximum value of the variable is returned for all trajectories
as a new DataArray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
variable : string
Name of the variable in the trajectories object.
Returns
-------
DataSet.
Original DataSet is returned with appended attribute
variable {variable}_max DataArray containing the max
values along each trajectory, with dimension (traj).
Examples
--------
Get the maximum temperature along each trajectory.
>>> trajectories.get_max('temp').
"""
# -------------------
# Raising exceptions.
# -------------------
# Defining list of variables contained in data.
variables = list(self.data.variables)
if isinstance(variable, str) is False:
raise TypeError("variable must be specified as a string")
if variable not in variables:
raise ValueError("variable: \'" + variable + "\' not found in Dataset")
# ---------------------------------------------------
# Returning max values of variable with get_minmax().
# ---------------------------------------------------
max_values = get_minmax(self=self, variable=variable, get_max=True)
# -----------------------------
# Adding max_values to DataSet.
# -----------------------------
# Defining std. name of max_values using specified variable.
std_name = variable + "_max"
# Defining long name of max_values using specified variable.
long_name = "maximum " + variable
# Append max_values DataArray to original DataSet.
self.data[std_name] = xr.DataArray(max_values, dims=["traj"])
# Adding attributes to max_values DataArray.
self.data[std_name].attrs = {
'long_name': long_name,
'standard_name': std_name,
'units': self.data[variable].attrs['units']
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define get_min() function.
def get_min(self, variable):
"""
Returns minimum value of a specified variable for each trajectory.
The minimum value of the variable is returned for all trajectories
as a new DataArray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
variable : string
Name of the variable in the trajectories object.
Returns
-------
DataSet.
Original DataSet is returned with appended attribute
variable {variable}_max DataArray containing the min
values along each trajectory, with dimension (traj).
Examples
--------
Get the maximum temperature along each trajectory.
>>> trajectories.get_max('temp').
"""
# -------------------
# Raising exceptions.
# -------------------
# Defining list of variables contained in data.
variables = list(self.data.variables)
if isinstance(variable, str) is False:
raise TypeError("variable must be specified as a string")
if variable not in variables:
raise ValueError("variable: \'" + variable + "\' not found in Dataset")
# ---------------------------------------------------
# Returning min values of variable with get_minmax().
# ---------------------------------------------------
min_values = get_minmax(self=self, variable=variable, get_max=False)
# -----------------------------
# Adding min_values to DataSet.
# -----------------------------
# Defining std. name of min_values using specified variable.
std_name = variable + "_min"
# Defining long name of min_values using specified variable.
long_name = "minimum " + variable
# Append min_values DataArray to original DataSet.
self.data[std_name] = xr.DataArray(min_values, dims=["traj"])
# Adding attributes to min_values DataArray.
self.data[std_name].attrs = {
'long_name': long_name,
'standard_name': std_name,
'units': self.data[variable].attrs['units']
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define add_seed() method.
def add_seed(self):
"""
Adds seeding level when particles are released (start
of trajectory) as a new attribute variable.
The seeding level, an integer between 1 and the total no. of seeding
levels, marks when a particle is released into the system and is
returned for all trajectories as a new DataArray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
Returns
-------
DataSet.
Original DataSet is returned with appended attribute
variable seed_level DataArray containing the seed
level for each particle released, with dimension (traj).
Examples
--------
Get seed levels for all trajectories.
>>> trajectories.add_seed().
"""
# ------------------------------------
# Return seed levels with add_seed().
# ------------------------------------
seed_level = add_seed(self)
# -----------------------------
# Adding seed_level to DataSet.
# -----------------------------
# Append seed_level DataArray to original DataSet.
self.data['seed_level'] = xr.DataArray(seed_level, dims=["traj"])
# Adding attributes to seed_level DataArray.
self.data.seed_level.attrs = {
'long_name': "seeding level",
'standard_name': "seed_level",
'units': "none"
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define add_id() method.
def add_id(self):
"""
Returns unique identifier (integer) for each trajectory.
The trajectory id, an integer between 1 and the total no. of
trajectories, identifies every particle released into the system
and is returned for all trajectories as a new ndarray.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
Returns
-------
DataSet.
Original DataSet is returned with appended attribute
variable seed_level DataArray containing the id
for each particle released, with dimension (traj).
Examples
--------
Get trajectory id for all trajectories.
>>> trajectories.add_id().
"""
# -----------------------------------
# Return trajectory id with add_id().
# -----------------------------------
traj_id = add_id(self)
# --------------------------
# Adding traj_id to DataSet.
# --------------------------
# Append traj_id DataArray to original DataSet.
self.data['id'] = xr.DataArray(traj_id, dims=["traj"])
# Adding attributes to traj_id DataArray.
self.data.id.attrs = {
'long_name': "trajectory id",
'standard_name': "id",
'units': "none"
}
# Return trajectories object with updated DataSet.
return trajectories(self.data)
##############################################################################
# Define add_variable() method.
def add_variable(self, data, attributes):
"""
Adds a new variable to the existing trajectories object.
The variable data must be provided as an ndarray with dimensions
(traj) / (obs) / (traj x obs) and the attributes provided as a
dictionary.
Parameters
----------
self : trajectories object
Trajectories object passed from trajectories class method.
data : ndarray
values of new variable to be added to the trajectories object
DataSet.
attributes : dict
the attributes of the new variable, at a minimum -'long_name',
'standard_name' and 'units' should be included. The standard
name will be assigned as the attribute variable name.
Returns
-------
trajectories object
Original trajectories object is returned with new attribute
variable DataArray appended, dimensions are either (traj) /
(obs) / (traj x obs).
"""
# -------------------
# Raising exceptions.
# -------------------
if isinstance(data, np.ndarray) is False:
raise TypeError("data must be provided as an ndarray")
if isinstance(attributes, dict) is False:
raise TypeError("variable attributes must be provided as a dictionary")
# -----------------------------------------
# Returning updated | |
"""Module containing multiple classes to interact with openmediavault
based on StaticCube https://github.com/StaticCube/python-synology"""
# -*- coding:utf-8 -*-
import requests
import urllib3
class FormatHelper(object):
"""Class containing various formatting functions"""
@staticmethod
def bytes_to_readable(num):
"""Converts bytes to a human readable format"""
if num < 512:
return "0 Kb"
elif num < 1024:
return "1 Kb"
for unit in ['', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb']:
if abs(num) < 1024.0:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%.1f%s" % (num, 'Yb')
@staticmethod
def bytes_to_megabytes(num):
"""Converts bytes to megabytes"""
var_mb = num / 1024.0 / 1024.0
return round(var_mb, 1)
@staticmethod
def bytes_to_gigabytes(num):
"""Converts bytes to gigabytes"""
var_gb = num / 1024.0 / 1024.0 / 1024.0
return round(var_gb, 1)
@staticmethod
def bytes_to_terrabytes(num):
"""Converts bytes to terrabytes"""
var_tb = num / 1024.0 / 1024.0 / 1024.0 / 1024.0
return round(var_tb, 1)
class OmvUtilization(object):
"""Class containing Utilisation data"""
def __init__(self, raw_input):
self._data = None
self.update(raw_input)
def update(self, raw_input):
"""Allows updating Utilisation data with raw_input data"""
if raw_input is not None:
self._data = {}
for val in raw_input:
if val["index"] == 0:
self._data["hostname"] = val["value"]
elif val["index"] == 1:
self._data["version"] = val["value"]
elif val["index"] == 2:
self._data["processor"] = val["value"]
elif val["index"] == 3:
self._data["kernel"] = val["value"]
elif val["index"] == 4:
self._data["systemtime"] = val["value"]
elif val["index"] == 5:
self._data["uptime"] = val["value"]
elif val["index"] == 6:
self._data["load_average"] = val["value"]
elif val["index"] == 7:
self._data["cpu_load"] = val["value"]["value"]
elif val["index"] == 8:
self._data["mem_usage"] = val["value"]["value"]
@property
def hostname(self):
"""Hostname of openmediavault"""
if self._data is not None:
return self._data["hostname"]
@property
def up_time(self):
"""Get uptime"""
if self._data is not None:
return self._data["uptime"]
# @property
# def cpu_other_load(self):
# """'Other' percentage of the total cpu load"""
# if self._data is not None:
# return self._data["cpu"]["other_load"]
# @property
# def cpu_user_load(self):
# """'User' percentage of the total cpu load"""
# if self._data is not None:
# return self._data["cpu"]["user_load"]
# @property
# def cpu_system_load(self):
# """'System' percentage of the total cpu load"""
# if self._data is not None:
# return self._data["cpu"]["system_load"]
@property
def cpu_total_load(self):
"""Total CPU load for openmediavault"""
if self._data is not None:
return self._data["cpu_load"]
def _get_cpu_avg_load(self):
"""Get avg load and parse"""
if self._data is not None:
return self._data["load_average"].split(', ')
@property
def cpu_1min_load(self):
"""Average CPU load past minute"""
return self._get_cpu_avg_load()[0]
@property
def cpu_5min_load(self):
"""Average CPU load past 5 minutes"""
return self._get_cpu_avg_load()[1]
@property
def cpu_15min_load(self):
"""Average CPU load past 15 minutes"""
return self._get_cpu_avg_load()[2]
@property
def memory_real_usage(self):
"""Get mem usage"""
if self._data is not None:
return self._data["mem_usage"]
# # @property
# def memory_real_usage(self):
# """Real Memory Usage from openmediavault"""
# mem_usage = self._get_mem_usage()
# return str()
# if self._data is not None:
# return str(self._data["memory"]["real_usage"])
#
# def memory_size(self, human_readable=True):
# """Total Memory Size of openmediavault"""
# if self._data is not None:
# # Memory is actually returned in KB's
# # so multiply before converting
# return_data = int(self._data["memory"]["memory_size"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def memory_available_swap(self, human_readable=True):
# """Total Available Memory Swap"""
# if self._data is not None:
# # Memory is actually returned in KB's so
# # multiply before converting
# return_data = int(self._data["memory"]["avail_swap"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def memory_cached(self, human_readable=True):
# """Total Cached Memory"""
# if self._data is not None:
# # Memory is actually returned in KB's so
# # multiply before converting
# return_data = int(self._data["memory"]["cached"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def memory_available_real(self, human_readable=True):
# """Real available memory"""
# if self._data is not None:
# # Memory is actually returned in KB's so
# # multiply before converting
# return_data = int(self._data["memory"]["avail_real"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def memory_total_real(self, human_readable=True):
# """Total available real memory"""
# if self._data is not None:
# # Memory is actually returned in KB's so
# # multiply before converting
# return_data = int(self._data["memory"]["total_real"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def memory_total_swap(self, human_readable=True):
# """Total Swap Memory"""
# if self._data is not None:
# # Memory is actually returned in KB's so
# # multiply before converting
# return_data = int(self._data["memory"]["total_swap"]) * 1024
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def _get_network(self, network_id):
# """Function to get specific network (eth0, total, etc)"""
# if self._data is not None:
# for network in self._data["network"]:
# if network["device"] == network_id:
# return network
# def network_up(self, human_readable=True):
# """Total upload speed being used"""
# network = self._get_network("total")
# if network is not None:
# return_data = int(network["tx"])
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
# def network_down(self, human_readable=True):
# """Total download speed being used"""
# network = self._get_network("total")
# if network is not None:
# return_data = int(network["rx"])
# if human_readable:
# return FormatHelper.bytes_to_readable(
# return_data)
# else:
# return return_data
class OmvStorage(object):
"""Class containing Storage data"""
def __init__(self, raw_input):
self._data = None
self.update(raw_input)
def update(self, raw_input):
"""Allows updating Utilisation data with raw_input data"""
if raw_input is not None:
self._data = raw_input
@property
def volumes(self):
"""Returns all available volumes"""
if self._data is not None:
volumes = []
for volume in self._data["volumes"]:
volumes.append(volume["devicefile"])
return volumes
def _get_volume(self, volume_devicefile):
"""Returns a specific volume"""
if self._data is not None:
for volume in self._data["volumes"]:
if volume["devicefile"] == volume_devicefile:
return volume
def volume_status(self, volume):
"""Status of the volume (clean etc.)"""
volume = self._get_volume(volume)
raid = self._get_raid(volume["devicefile"])
if volume is not None and raid is not None:
return raid["state"]
def volume_device_type(self, volume):
"""Returns the volume type (RAID1, RAID2, etc)"""
volume = self._get_volume(volume)
try:
raid = self._get_raid(volume["devicefile"])
if volume is not None and raid is not None:
return raid["level"]
except KeyError:
pass
return None
def _volume_mounted(self, volume):
"""Returns boolean if mounted"""
volume = self._get_volume(volume)
if volume is not None:
return volume["mounted"]
return False
def volume_size_total(self, volume, human_readable=True):
"""Total size of volume"""
volume = self._get_volume(volume)
if volume is not None and self._volume_mounted(volume["devicefile"]):
return_data = int(volume["size"])
if human_readable:
return FormatHelper.bytes_to_readable(
return_data)
else:
return return_data
def volume_size_used(self, volume, human_readable=True):
"""Total used size in volume"""
volume = self._get_volume(volume)
if volume is not None and self._volume_mounted(volume["devicefile"]):
return_data = int(int(volume["size"])-int(volume["available"]))
if human_readable:
return FormatHelper.bytes_to_readable(
return_data)
else:
return return_data
def volume_percentage_used(self, volume):
"""Total used size in percentage for volume"""
volume = self._get_volume(volume)
if volume is not None:
total = int(volume["size"])
used = int(int(volume["size"])-int(volume["available"]))
if used is not None and used > 0 and \
total is not None and total > 0:
return round((float(used) / float(total)) * 100.0, 1)
def volume_disk_temp_avg(self, volume):
"""Average temperature of all disks making up the volume"""
volume = self._get_volume(volume)
if volume is not None:
if self.volume_device_type(volume["devicefile"]) is None:
return self.disk_temp(volume["parentdevicefile"])
vol_disks = self._get_raid(volume["devicefile"])
if vol_disks is not None:
total_temp = 0
total_disks = 0
for vol_raid in vol_disks["devices"]:
disk_temp = self.disk_temp(vol_raid[0:-1])
if disk_temp is not None:
total_disks += 1
total_temp += disk_temp
if total_temp > 0 and total_disks > 0:
return int(round(total_temp / total_disks, 0))
def volume_disk_temp_max(self, volume):
"""Maximum temperature of all disks making up the volume"""
volume = self._get_volume(volume)
if volume is not None:
if self.volume_device_type(volume["devicefile"]) is None:
return self.disk_temp(volume["parentdevicefile"])
vol_disks = self._get_raid(volume["devicefile"])
if vol_disks is not None:
max_temp = 0
for vol_raid in vol_disks["devices"]:
disk_temp = self.disk_temp(vol_raid[0:-1])
if disk_temp is not None and disk_temp > max_temp:
max_temp = disk_temp
return max_temp
@property
def raids(self):
"""Returns all available raids"""
if self._data is not None:
raids = []
for raid in self._data["raid"]:
raids.append(raid["devicefile"])
return raids
def _get_raid(self, raid_devicefile):
"""Returns a specific raid"""
if self._data is not None:
for raid in self._data["raid"]:
if raid["devicefile"] == raid_devicefile:
return raid
def raid_name(self, raid):
"""The name of this raid"""
raid = self._get_raid(raid)
if raid is not None:
return raid["name"]
def raid_devices(self, raid):
"""The devices of this raid"""
raid = self._get_raid(raid)
if raid is not None:
devices = []
for device in raid["devices"]:
devices.append(device)
return devices
def devicefile_from_raid(self, disk):
"""Get raid of disk"""
for raid in self.raids:
for device in self.raid_devices(raid):
if device[0:-1] == disk["devicefile"]:
print(raid)
return raid
return raid
@property
def disks(self):
"""Returns all available (internal) disks"""
if self._data is not None:
disks = []
for disk in self._data["smart"]:
disks.append(disk["devicefile"])
return disks
def _get_disk(self, disk_devicefile):
"""Returns a specific disk"""
if self._data is not None:
for disk in self._data["smart"]:
if disk["devicefile"] == disk_devicefile:
return disk
def disk_name(self, disk):
"""The name of this disk"""
disk = self._get_disk(disk)
if disk is not None:
return disk["model"]
def disk_smart_status(self, disk):
| |
"gfdl-1.3": {
"description": "GNU Free Documentation License v1.3",
"name": "GFDL-1.3",
"url": "https://www.gnu.org/licenses/fdl-1.3.txt"
},
"gfdl-1.3-only": {
"description": "GNU Free Documentation License v1.3 only",
"name": "GFDL-1.3-only",
"url": "https://www.gnu.org/licenses/fdl-1.3.txt"
},
"gfdl-1.3-or-later": {
"description": "GNU Free Documentation License v1.3 or later",
"name": "GFDL-1.3-or-later",
"url": "https://www.gnu.org/licenses/fdl-1.3.txt"
},
"giftware": {
"description": "Giftware License",
"name": "Giftware",
"url": "http://liballeg.org/license.html#allegro-4-the-giftware-license"
},
"gl2ps": {
"description": "GL2PS License",
"name": "GL2PS",
"url": "http://www.geuz.org/gl2ps/COPYING.GL2PS"
},
"glide": {
"description": "3dfx Glide License",
"name": "Glide",
"url": "http://www.users.on.net/~triforce/glidexp/COPYING.txt"
},
"glulxe": {
"description": "Glulxe License",
"name": "Glulxe",
"url": "https://fedoraproject.org/wiki/Licensing/Glulxe"
},
"gnuplot": {
"description": "gnuplot License",
"name": "gnuplot",
"url": "https://fedoraproject.org/wiki/Licensing/Gnuplot"
},
"gpl-1.0": {
"description": "GNU General Public License v1.0 only",
"name": "GPL-1.0",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
},
"gpl-1.0+": {
"description": "GNU General Public License v1.0 or later",
"name": "GPL-1.0+",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
},
"gpl-1.0-only": {
"description": "GNU General Public License v1.0 only",
"name": "GPL-1.0-only",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
},
"gpl-1.0-or-later": {
"description": "GNU General Public License v1.0 or later",
"name": "GPL-1.0-or-later",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
},
"gpl-2.0": {
"description": "GNU General Public License v2.0 only",
"name": "GPL-2.0",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html"
},
"gpl-2.0+": {
"description": "GNU General Public License v2.0 or later",
"name": "GPL-2.0+",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html"
},
"gpl-2.0-only": {
"description": "GNU General Public License v2.0 only",
"name": "GPL-2.0-only",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html"
},
"gpl-2.0-or-later": {
"description": "GNU General Public License v2.0 or later",
"name": "GPL-2.0-or-later",
"url": "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html"
},
"gpl-2.0-with-autoconf-exception": {
"description": "GNU General Public License v2.0 w/Autoconf exception",
"name": "GPL-2.0-with-autoconf-exception",
"url": "http://ac-archive.sourceforge.net/doc/copyright.html"
},
"gpl-2.0-with-bison-exception": {
"description": "GNU General Public License v2.0 w/Bison exception",
"name": "GPL-2.0-with-bison-exception",
"url": "http://git.savannah.gnu.org/cgit/bison.git/tree/data/yacc.c?id=193d7c7054ba7197b0789e14965b739162319b5e#n141"
},
"gpl-2.0-with-classpath-exception": {
"description": "GNU General Public License v2.0 w/Classpath exception",
"name": "GPL-2.0-with-classpath-exception",
"url": "https://www.gnu.org/software/classpath/license.html"
},
"gpl-2.0-with-font-exception": {
"description": "GNU General Public License v2.0 w/Font exception",
"name": "GPL-2.0-with-font-exception",
"url": "https://www.gnu.org/licenses/gpl-faq.html#FontException"
},
"gpl-2.0-with-gcc-exception": {
"description": "GNU General Public License v2.0 w/GCC Runtime Library exception",
"name": "GPL-2.0-with-GCC-exception",
"url": "https://gcc.gnu.org/git/?p=gcc.git;a=blob;f=gcc/libgcc1.c;h=762f5143fc6eed57b6797c82710f3538aa52b40b;hb=cb143a3ce4fb417c68f5fa2691a1b1b1053dfba9#l10"
},
"gpl-3.0": {
"description": "GNU General Public License v3.0 only",
"name": "GPL-3.0",
"url": "https://www.gnu.org/licenses/gpl-3.0-standalone.html"
},
"gpl-3.0+": {
"description": "GNU General Public License v3.0 or later",
"name": "GPL-3.0+",
"url": "https://www.gnu.org/licenses/gpl-3.0-standalone.html"
},
"gpl-3.0-only": {
"description": "GNU General Public License v3.0 only",
"name": "GPL-3.0-only",
"url": "https://www.gnu.org/licenses/gpl-3.0-standalone.html"
},
"gpl-3.0-or-later": {
"description": "GNU General Public License v3.0 or later",
"name": "GPL-3.0-or-later",
"url": "https://www.gnu.org/licenses/gpl-3.0-standalone.html"
},
"gpl-3.0-with-autoconf-exception": {
"description": "GNU General Public License v3.0 w/Autoconf exception",
"name": "GPL-3.0-with-autoconf-exception",
"url": "https://www.gnu.org/licenses/autoconf-exception-3.0.html"
},
"gpl-3.0-with-gcc-exception": {
"description": "GNU General Public License v3.0 w/GCC Runtime Library exception",
"name": "GPL-3.0-with-GCC-exception",
"url": "https://www.gnu.org/licenses/gcc-exception-3.1.html"
},
"gsoap-1.3b": {
"description": "gSOAP Public License v1.3b",
"name": "gSOAP-1.3b",
"url": "http://www.cs.fsu.edu/~engelen/license.html"
},
"haskellreport": {
"description": "Haskell Language Report License",
"name": "HaskellReport",
"url": "https://fedoraproject.org/wiki/Licensing/Haskell_Language_Report_License"
},
"hpnd": {
"description": "Historical Permission Notice and Disclaimer",
"name": "HPND",
"url": "https://opensource.org/licenses/HPND"
},
"hpnd-sell-variant": {
"description": "Historical Permission Notice and Disclaimer - sell variant",
"name": "HPND-sell-variant",
"url": "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/auth_gss/gss_generic_token.c?h=v4.19"
},
"ibm-pibs": {
"description": "IBM PowerPC Initialization and Boot Software",
"name": "IBM-pibs",
"url": "http://git.denx.de/?p=u-boot.git;a=blob;f=arch/powerpc/cpu/ppc4xx/miiphy.c;h=297155fdafa064b955e53e9832de93bfb0cfb85b;hb=9fab4bf4cc077c21e43941866f3f2c196f28670d"
},
"icu": {
"description": "ICU License",
"name": "ICU",
"url": "http://source.icu-project.org/repos/icu/icu/trunk/license.html"
},
"ijg": {
"description": "Independent JPEG Group License",
"name": "IJG",
"url": "http://dev.w3.org/cvsweb/Amaya/libjpeg/Attic/README?rev=1.2"
},
"imagemagick": {
"description": "ImageMagick License",
"name": "ImageMagick",
"url": "http://www.imagemagick.org/script/license.php"
},
"imatix": {
"description": "iMatix Standard Function Library Agreement",
"name": "iMatix",
"url": "http://legacy.imatix.com/html/sfl/sfl4.htm#license"
},
"imlib2": {
"description": "Imlib2 License",
"name": "Imlib2",
"url": "http://trac.enlightenment.org/e/browser/trunk/imlib2/COPYING"
},
"info-zip": {
"description": "Info-ZIP License",
"name": "Info-ZIP",
"url": "http://www.info-zip.org/license.html"
},
"intel": {
"description": "Intel Open Source License",
"name": "Intel",
"url": "https://opensource.org/licenses/Intel"
},
"intel-acpi": {
"description": "Intel ACPI Software License Agreement",
"name": "Intel-ACPI",
"url": "https://fedoraproject.org/wiki/Licensing/Intel_ACPI_Software_License_Agreement"
},
"interbase-1.0": {
"description": "Interbase Public License v1.0",
"name": "Interbase-1.0",
"url": "https://web.archive.org/web/20060319014854/http://info.borland.com/devsupport/interbase/opensource/IPL.html"
},
"ipa": {
"description": "IPA Font License",
"name": "IPA",
"url": "https://opensource.org/licenses/IPA"
},
"ipl-1.0": {
"description": "IBM Public License v1.0",
"name": "IPL-1.0",
"url": "https://opensource.org/licenses/IPL-1.0"
},
"isc": {
"description": "ISC License",
"name": "ISC",
"url": "https://www.isc.org/downloads/software-support-policy/isc-license/"
},
"jasper-2.0": {
"description": "JasPer License",
"name": "JasPer-2.0",
"url": "http://www.ece.uvic.ca/~mdadams/jasper/LICENSE"
},
"jpnic": {
"description": "Japan Network Information Center License",
"name": "JPNIC",
"url": "https://gitlab.isc.org/isc-projects/bind9/blob/master/COPYRIGHT#L366"
},
"json": {
"description": "JSON License",
"name": "JSON",
"url": "http://www.json.org/license.html"
},
"lal-1.2": {
"description": "Licence Art Libre 1.2",
"name": "LAL-1.2",
"url": "http://artlibre.org/licence/lal/licence-art-libre-12/"
},
"lal-1.3": {
"description": "Licence Art Libre 1.3",
"name": "LAL-1.3",
"url": "https://artlibre.org/"
},
"latex2e": {
"description": "Latex2e License",
"name": "Latex2e",
"url": "https://fedoraproject.org/wiki/Licensing/Latex2e"
},
"leptonica": {
"description": "Leptonica License",
"name": "Leptonica",
"url": "https://fedoraproject.org/wiki/Licensing/Leptonica"
},
"lgpl-2.0": {
"description": "GNU Library General Public License v2 only",
"name": "LGPL-2.0",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
},
"lgpl-2.0+": {
"description": "GNU Library General Public License v2 or later",
"name": "LGPL-2.0+",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
},
"lgpl-2.0-only": {
"description": "GNU Library General Public License v2 only",
"name": "LGPL-2.0-only",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
},
"lgpl-2.0-or-later": {
"description": "GNU Library General Public License v2 or later",
"name": "LGPL-2.0-or-later",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
},
"lgpl-2.1": {
"description": "GNU Lesser General Public License v2.1 only",
"name": "LGPL-2.1",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html"
},
"lgpl-2.1+": {
"description": "GNU Library General Public License v2.1 or later",
"name": "LGPL-2.1+",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html"
},
"lgpl-2.1-only": {
"description": "GNU Lesser General Public License v2.1 only",
"name": "LGPL-2.1-only",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html"
},
"lgpl-2.1-or-later": {
"description": "GNU Lesser General Public License v2.1 or later",
"name": "LGPL-2.1-or-later",
"url": "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html"
},
"lgpl-3.0": {
"description": "GNU Lesser General Public License v3.0 only",
"name": "LGPL-3.0",
"url": "https://www.gnu.org/licenses/lgpl-3.0-standalone.html"
},
"lgpl-3.0+": {
"description": "GNU Lesser General Public License v3.0 or later",
"name": "LGPL-3.0+",
"url": "https://www.gnu.org/licenses/lgpl-3.0-standalone.html"
},
"lgpl-3.0-only": {
"description": "GNU Lesser General Public License v3.0 only",
"name": "LGPL-3.0-only",
"url": "https://www.gnu.org/licenses/lgpl-3.0-standalone.html"
},
"lgpl-3.0-or-later": {
"description": "GNU Lesser General Public License v3.0 or later",
"name": "LGPL-3.0-or-later",
"url": "https://www.gnu.org/licenses/lgpl-3.0-standalone.html"
},
"lgpllr": {
"description": "Lesser General Public License For Linguistic Resources",
"name": "LGPLLR",
"url": "http://www-igm.univ-mlv.fr/~unitex/lgpllr.html"
},
"libpng": {
"description": "libpng License",
"name": "Libpng",
"url": "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
},
"libpng-2.0": {
"description": "PNG Reference Library version 2",
"name": "libpng-2.0",
"url": "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
},
"libtiff": {
"description": "libtiff License",
"name": "libtiff",
"url": "https://fedoraproject.org/wiki/Licensing/libtiff"
},
"liliq-p-1.1": {
"description": "Licence Libre du Qu\u00e9bec \u2013 Permissive version 1.1",
"name": "LiLiQ-P-1.1",
"url": "https://forge.gouv.qc.ca/licence/fr/liliq-v1-1/"
},
"liliq-r-1.1": {
"description": "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 version 1.1",
"name": "LiLiQ-R-1.1",
"url": "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-liliq-r-v1-1/"
},
"liliq-rplus-1.1": {
"description": "Licence Libre du Qu\u00e9bec \u2013 R\u00e9ciprocit\u00e9 forte version 1.1",
"name": "LiLiQ-Rplus-1.1",
"url": "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-forte-liliq-r-v1-1/"
},
"linux-openib": {
"description": "Linux Kernel Variant of OpenIB.org license",
"name": "Linux-OpenIB",
"url": "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/infiniband/core/sa.h"
},
"lpl-1.0": {
"description": "Lucent Public License Version 1.0",
"name": "LPL-1.0",
"url": "https://opensource.org/licenses/LPL-1.0"
},
"lpl-1.02": {
"description": "Lucent Public License v1.02",
"name": "LPL-1.02",
"url": "http://plan9.bell-labs.com/plan9/license.html"
},
"lppl-1.0": {
"description": "LaTeX Project Public License v1.0",
"name": "LPPL-1.0",
"url": "http://www.latex-project.org/lppl/lppl-1-0.txt"
},
"lppl-1.1": {
"description": "LaTeX Project Public License v1.1",
"name": "LPPL-1.1",
"url": "http://www.latex-project.org/lppl/lppl-1-1.txt"
},
"lppl-1.2": {
"description": "LaTeX Project Public License v1.2",
"name": "LPPL-1.2",
"url": "http://www.latex-project.org/lppl/lppl-1-2.txt"
},
"lppl-1.3a": {
"description": "LaTeX Project Public License v1.3a",
"name": "LPPL-1.3a",
"url": "http://www.latex-project.org/lppl/lppl-1-3a.txt"
},
"lppl-1.3c": {
"description": "LaTeX Project Public License v1.3c",
"name": "LPPL-1.3c",
"url": "http://www.latex-project.org/lppl/lppl-1-3c.txt"
},
"makeindex": {
"description": | |
test_instances_are_tracked(self):
"""_Package instances are tracked"""
pkg = _Package()
self.assertIn(pkg, _Package.instances())
def test_instance_refs_are_garbage_collected(self):
"""_Package instance refs are garbage collected with old instances"""
pkg = _Package()
pkg1_repr = "%r" % pkg
pkg = _Package()
# pkg2_repr = "%r" % pkg
gc.collect()
reprs = [repr(pkg_inst) for pkg_inst in _Package.instances()]
# log.debug("pkg1, pkg2, reprs: %s, %s, %s"
# % (pkg1_repr, pkg2_repr, reprs))
assert_that(pkg1_repr, is_not(is_in(reprs)))
def test_containing_returns_correct_pkg(self):
"""_Package.containing() returns right package instance"""
# setup ------------------------
pkg1 = _Package(test_pptx_path)
pkg1.presentation # does nothing, just needed to fake out pep8 warning
pkg2 = _Package(test_pptx_path)
slide = pkg2.presentation.slides[0]
# exercise ---------------------
found_pkg = _Package.containing(slide)
# verify -----------------------
expected = pkg2
actual = found_pkg
msg = "expected %r, got %r" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test_containing_raises_on_no_pkg_contains_part(self):
"""_Package.containing(part) raises on no package contains part"""
# setup ------------------------
pkg = _Package(test_pptx_path)
pkg.presentation # does nothing, just needed to fake out pep8 warning
part = Mock(name='part')
# verify -----------------------
with self.assertRaises(KeyError):
_Package.containing(part)
def test_open_gathers_image_parts(self):
"""_Package open gathers image parts into image collection"""
# exercise ---------------------
pkg = _Package(images_pptx_path)
# verify -----------------------
expected = 7
actual = len(pkg._Package__images)
msg = "expected image count of %d, got %d" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test_presentation_presentation_after_open(self):
"""_Package.presentation is instance of Presentation after open()"""
# setup ------------------------
cls = Presentation
pkg = _Package()
# exercise ---------------------
obj = pkg.presentation
# verify -----------------------
actual = isinstance(obj, cls)
msg = ("expected instance of '%s', got type '%s'"
% (cls.__name__, type(obj).__name__))
self.assertTrue(actual, msg)
def test_it_should_have_core_props(self):
"""_Package should provide access to core document properties"""
# setup ------------------------
pkg = _Package()
# verify -----------------------
assert_that(pkg.core_properties, is_(instance_of(_CoreProperties)))
def test_saved_file_has_plausible_contents(self):
"""_Package.save produces a .pptx with plausible contents"""
# setup ------------------------
pkg = _Package()
# exercise ---------------------
pkg.save(self.test_pptx_path)
# verify -----------------------
pkg = _Package(self.test_pptx_path)
prs = pkg.presentation
assert_that(prs, is_not(None))
slidemasters = prs.slidemasters
assert_that(slidemasters, is_not(None))
assert_that(len(slidemasters), is_(1))
slidelayouts = slidemasters[0].slidelayouts
assert_that(slidelayouts, is_not(None))
assert_that(len(slidelayouts), is_(11))
class Test_Part(TestCase):
"""Test _Part"""
def test_constructs_presentation_for_rt_officedocument(self):
"""_Part() returns Presentation for RT_OFFICE_DOCUMENT"""
# setup ------------------------
cls = Presentation
# exercise ---------------------
obj = _Part(RT_OFFICE_DOCUMENT, CT_PRESENTATION)
# verify -----------------------
self.assertIsInstance(obj, cls)
def test_constructs_slide_for_rt_slide(self):
"""_Part() returns _Slide for RT_SLIDE"""
# setup ------------------------
cls = _Slide
# exercise ---------------------
obj = _Part(RT_SLIDE, CT_SLIDE)
# verify -----------------------
self.assertIsInstance(obj, cls)
def test_constructs_slidelayout_for_rt_slidelayout(self):
"""_Part() returns _SlideLayout for RT_SLIDE_LAYOUT"""
# setup ------------------------
cls = _SlideLayout
# exercise ---------------------
obj = _Part(RT_SLIDE_LAYOUT, CT_SLIDE_LAYOUT)
# verify -----------------------
self.assertIsInstance(obj, cls)
def test_constructs_slidemaster_for_rt_slidemaster(self):
"""_Part() returns _SlideMaster for RT_SLIDE_MASTER"""
# setup ------------------------
cls = _SlideMaster
# exercise ---------------------
obj = _Part(RT_SLIDE_MASTER, CT_SLIDE_MASTER)
# verify -----------------------
self.assertIsInstance(obj, cls)
def test_contructor_raises_on_invalid_prs_content_type(self):
"""_Part() raises on invalid presentation content type"""
with self.assertRaises(InvalidPackageError):
_Part(RT_OFFICE_DOCUMENT, CT_SLIDE_MASTER)
class Test_PartCollection(TestCase):
"""Test _PartCollection"""
def test__loadpart_sorts_loaded_parts(self):
"""_PartCollection._loadpart sorts loaded parts"""
# setup ------------------------
partname1 = '/ppt/slides/slide1.xml'
partname2 = '/ppt/slides/slide2.xml'
partname3 = '/ppt/slides/slide3.xml'
part1 = Mock(name='part1')
part1.partname = partname1
part2 = Mock(name='part2')
part2.partname = partname2
part3 = Mock(name='part3')
part3.partname = partname3
parts = _PartCollection()
# exercise ---------------------
parts._loadpart(part2)
parts._loadpart(part3)
parts._loadpart(part1)
# verify -----------------------
expected = [partname1, partname2, partname3]
actual = [part.partname for part in parts]
msg = "expected %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
class Test_Presentation(TestCase):
"""Test Presentation"""
def setUp(self):
self.prs = Presentation()
def test__blob_rewrites_sldIdLst(self):
"""Presentation._blob rewrites sldIdLst"""
# setup ------------------------
rels = RelationshipCollectionBuilder()
rels = rels.with_tuple_targets(2, RT_SLIDE_MASTER)
rels = rels.with_tuple_targets(3, RT_SLIDE)
rels = rels.with_ordering(RT_SLIDE_MASTER, RT_SLIDE)
rels = rels.build()
prs = Presentation()
prs._relationships = rels
prs.partname = '/ppt/presentation.xml'
path = os.path.join(thisdir, 'test_files/presentation.xml')
prs._element = oxml_parse(path).getroot()
# exercise ---------------------
blob = prs._blob
# verify -----------------------
presentation = oxml_fromstring(blob)
sldIds = presentation.xpath('./p:sldIdLst/p:sldId', namespaces=nsmap)
expected = ['rId3', 'rId4', 'rId5']
actual = [sldId.get(qtag('r:id')) for sldId in sldIds]
msg = "expected ordering %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test_slidemasters_property_empty_on_construction(self):
"""Presentation.slidemasters property empty on construction"""
# verify -----------------------
self.assertIsSizedProperty(self.prs, 'slidemasters', 0)
def test_slidemasters_correct_length_after_pkg_open(self):
"""Presentation.slidemasters correct length after load"""
# setup ------------------------
pkg = _Package(test_pptx_path)
prs = pkg.presentation
# exercise ---------------------
slidemasters = prs.slidemasters
# verify -----------------------
self.assertLength(slidemasters, 1)
def test_slides_property_empty_on_construction(self):
"""Presentation.slides property empty on construction"""
# verify -----------------------
self.assertIsSizedProperty(self.prs, 'slides', 0)
def test_slides_correct_length_after_pkg_open(self):
"""Presentation.slides correct length after load"""
# setup ------------------------
pkg = _Package(test_pptx_path)
prs = pkg.presentation
# exercise ---------------------
slides = prs.slides
# verify -----------------------
self.assertLength(slides, 1)
class Test_Relationship(TestCase):
"""Test _Relationship"""
def setUp(self):
rId = 'rId1'
reltype = RT_SLIDE
target_part = None
self.rel = _Relationship(rId, reltype, target_part)
def test_constructor_raises_on_bad_rId(self):
"""_Relationship constructor raises on non-standard rId"""
with self.assertRaises(AssertionError):
_Relationship('Non-std14', None, None)
def test__num_value(self):
"""_Relationship._num value is correct"""
# setup ------------------------
num = 91
rId = 'rId%d' % num
rel = _Relationship(rId, None, None)
# verify -----------------------
assert_that(rel._num, is_(equal_to(num)))
def test__num_value_on_non_standard_rId(self):
"""_Relationship._num value is correct for non-standard rId"""
# setup ------------------------
rel = _Relationship('rIdSm', None, None)
# verify -----------------------
assert_that(rel._num, is_(equal_to(9999)))
def test__rId_setter(self):
"""Relationship._rId setter stores passed value"""
# setup ------------------------
rId = 'rId9'
# exercise ----------------
self.rel._rId = rId
# verify ------------------
expected = rId
actual = self.rel._rId
msg = "expected '%s', got '%s'" % (expected, actual)
self.assertEqual(expected, actual, msg)
class Test_RelationshipCollection(TestCase):
"""Test _RelationshipCollection"""
def setUp(self):
self.relationships = _RelationshipCollection()
def __reltype_ordering_mock(self):
"""
Return RelationshipCollection instance with mocked-up contents
suitable for testing _reltype_ordering.
"""
# setup ------------------------
partnames = ['/ppt/slides/slide4.xml',
'/ppt/slideLayouts/slideLayout1.xml',
'/ppt/slideMasters/slideMaster1.xml',
'/ppt/slides/slide1.xml',
'/ppt/presProps.xml']
part1 = Mock(name='part1')
part1.partname = partnames[0]
part2 = Mock(name='part2')
part2.partname = partnames[1]
part3 = Mock(name='part3')
part3.partname = partnames[2]
part4 = Mock(name='part4')
part4.partname = partnames[3]
part5 = Mock(name='part5')
part5.partname = partnames[4]
rel1 = _Relationship('rId1', RT_SLIDE, part1)
rel2 = _Relationship('rId2', RT_SLIDE_LAYOUT, part2)
rel3 = _Relationship('rId3', RT_SLIDE_MASTER, part3)
rel4 = _Relationship('rId4', RT_SLIDE, part4)
rel5 = _Relationship('rId5', RT_PRES_PROPS, part5)
relationships = _RelationshipCollection()
relationships._additem(rel1)
relationships._additem(rel2)
relationships._additem(rel3)
relationships._additem(rel4)
relationships._additem(rel5)
return (relationships, partnames)
def test_it_can_find_related_part(self):
"""_RelationshipCollection can find related part"""
# setup ------------------------
reltype = RT_CORE_PROPS
part = Mock(name='part')
relationship = _Relationship('rId1', reltype, part)
relationships = _RelationshipCollection()
relationships._additem(relationship)
# exercise ---------------------
retval = relationships.related_part(reltype)
# verify -----------------------
assert_that(retval, same_instance(part))
def test_it_raises_if_it_cant_find_a_related_part(self):
"""_RelationshipCollection raises if it can't find a related part"""
# setup ------------------------
relationships = _RelationshipCollection()
# exercise ---------------------
with self.assertRaises(KeyError):
relationships.related_part('foobar')
def test__additem_raises_on_dup_rId(self):
"""_RelationshipCollection._additem raises on duplicate rId"""
# setup ------------------------
part1 = _BasePart()
part2 = _BasePart()
rel1 = _Relationship('rId9', None, part1)
rel2 = _Relationship('rId9', None, part2)
self.relationships._additem(rel1)
# verify -----------------------
with self.assertRaises(ValueError):
self.relationships._additem(rel2)
def test__additem_maintains_rId_ordering(self):
"""_RelationshipCollection maintains rId ordering on additem()"""
# setup ------------------------
part1 = _BasePart()
part2 = _BasePart()
part3 = _BasePart()
rel1 = _Relationship('rId1', None, part1)
rel2 = _Relationship('rId2', None, part2)
rel3 = _Relationship('rId3', None, part3)
# exercise ---------------------
self.relationships._additem(rel2)
self.relationships._additem(rel1)
self.relationships._additem(rel3)
# verify -----------------------
expected = ['rId1', 'rId2', 'rId3']
actual = [rel._rId for rel in self.relationships]
msg = "expected ordering %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test__additem_maintains_reltype_ordering(self):
"""_RelationshipCollection maintains reltype ordering on additem()"""
# setup ------------------------
relationships, partnames = self.__reltype_ordering_mock()
ordering = (RT_SLIDE_MASTER, RT_SLIDE_LAYOUT, RT_SLIDE)
relationships._reltype_ordering = ordering
partname = '/ppt/slides/slide2.xml'
part = Mock(name='new_part')
part.partname = partname
rId = relationships._next_rId
rel = _Relationship(rId, RT_SLIDE, part)
# exercise ---------------------
relationships._additem(rel)
# verify ordering -------------
expected = [partnames[2], partnames[1], partnames[3],
partname, partnames[0], partnames[4]]
actual = [r._target.partname for r in relationships]
msg = "expected ordering %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test_rels_of_reltype_return_value(self):
"""RelationshipCollection._rels_of_reltype returns correct rels"""
# setup ------------------------
relationships, partnames = self.__reltype_ordering_mock()
# exercise ---------------------
retval = relationships.rels_of_reltype(RT_SLIDE)
# verify ordering -------------
expected = ['rId1', 'rId4']
actual = [rel._rId for rel in retval]
msg = "expected %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test__reltype_ordering_sorts_rels(self):
"""RelationshipCollection._reltype_ordering sorts rels"""
# setup ------------------------
relationships, partnames = self.__reltype_ordering_mock()
ordering = (RT_SLIDE_MASTER, RT_SLIDE_LAYOUT, RT_SLIDE)
# exercise ---------------------
relationships._reltype_ordering = ordering
# verify ordering -------------
assert_that(relationships._reltype_ordering, is_(equal_to(ordering)))
expected = [partnames[2], partnames[1], partnames[3], partnames[0],
partnames[4]]
actual = [rel._target.partname for rel in relationships]
msg = "expected ordering %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test__reltype_ordering_renumbers_rels(self):
"""RelationshipCollection._reltype_ordering renumbers rels"""
# setup ------------------------
relationships, partnames = self.__reltype_ordering_mock()
ordering = (RT_SLIDE_MASTER, RT_SLIDE_LAYOUT, RT_SLIDE)
# exercise ---------------------
relationships._reltype_ordering = ordering
# verify renumbering ----------
expected = ['rId1', 'rId2', 'rId3', 'rId4', 'rId5']
actual = [rel._rId for rel in relationships]
msg = "expected numbering %s, got %s" % (expected, actual)
self.assertEqual(expected, actual, msg)
def test__next_rId_fills_gap(self):
"""_RelationshipCollection._next_rId fills | |
extruding extrusion extrusions exuberantly exultantly eyeful
eyefuls eyeglass eyeglasses eyelet eyelets eyeliner eyeliners eyepiece
eyepieces eyestrain eyeteeth eyetooth f fa fabled fabulously
facetiously facetiousness facially facilitation facings factional
factionalism factitious factotum factotums faddish fag fagged fagging
fags fain fained fainer fainest faining fains fainthearted faintness
fairground fairgrounds fairway fairways fairyland fairylands
faithlessly faithlessness faker fakers fakir fakirs falconer falconers
fallaciously fallibility fallibly falloff falloffs fallow fallowed
fallowing fallows falseness falsifiable falteringly falterings
familial familiarly famish famished famishes famishing famously
fanatically fanaticism fanciers fancifully fancily fanciness fannies
fanny fanzine farcical farina farinaceous farmhand farmhands farmhouse
farmhouses farmyard farmyards farrow farrowed farrowing farrows
farsighted farsightedness fart farted farthing farthings farting farts
fastidiously fastidiousness fastness fastnesses fatalism fatalist
fatalists fatefully fathead fatheads fatherless fathomable fathomless
fatness fattenings fatuously fatuousness faultfinding faultily
faultiness faultlessly faun fauns fax faxed faxes faxing fealty
fearfulness fearlessness feasibly featherbedding featherweight
featherweights featureless febrile fecal feckless fecund fecundity
federally federate federated federates federating fedora fedoras
feebleness feebly feedbag feedbags feedings feelingly feistier
feistiest feisty feldspar felicities felicitous felicity fellatio
felonious femoral fems femur femurs fencer fencers fennel fens fer
feral ferociousness ferric ferrous ferrule ferrules ferryboat
ferryboats fervency fervid fervidly fest festal festals festively
fests feta fetchingly fetishism fetishist fetishistic fetishists
fetlock fetlocks fettle feudalistic fevered fey fiat fiats fibroid
fibrous fibula fibulae fiches fickleness fiddlesticks fiduciaries
fiduciary fie fief fiefs fielder fielders fieldwork fieriness fies
fife fifes figurine figurines filamentous filbert filberts filial
filibuster filibustered filibustering filibusters filigree filigreed
filigreeing filigrees filings fillers fillings fillip filliped
filliping fillips filmmaker filmmakers filmstrip filmstrips filterable
filthiness filtrate filtrated filtrates filtrating filtration finagle
finagled finagler finaglers finagles finagling finder finders fineness
finery fingerboard fingerboards fingerings finis finises finisher
finishers finitely fink finked finking finks finnier finniest finny
fireball fireballs firebomb firebombed firebombing firebombs firebrand
firebrands firebreak firebreaks firebug firebugs firefight
firefighting firefights firehouse firehouses fireplug fireplugs
firepower firestorm firestorms firetrap firetraps firewall firewalled
firewalling firewalls firewater firmament firmaments firstborn
firstborns firth firths fiscally fishbowl fishbowls fishers fishhook
fishhooks fishnet fishnets fishtail fishtailed fishtailing fishtails
fishwife fishwives fistful fistfuls fisticuffs fitfully fitly fitters
fittingly fixate fixated fixates fixating fixative fixatives fixedly
fixer fixers fixings fixity fizzier fizziest fjord fjords flab
flabbergast flabbergasted flabbergasting flabbergasts flabbiness
flaccid flack flacks flagella flagellate flagellated flagellates
flagellating flagellation flagellum flagon flagons flagstaff
flagstaffs flakiness flaks flambe flambeed flambeing flambes flamenco
flamencos flamethrower flamethrowers flamings flammability flan flange
flanges flapper flappers flashbulb flashbulbs flashers flashgun
flashguns flashily flashiness flatbed flatbeds flatboat flatboats
flatcar flatcars flatfeet flatfish flatfishes flatfoot flatfooted
flatfooting flatfoots flatiron flatirons flatteringly flattop flattops
flatulence flatulent flatware flautist flautists flax flaxen flay
flayed flaying flays fleetingly fleetness fleshlier fleshliest fleshly
flextime flibbertigibbet flibbertigibbets flightiness flimflam
flimflammed flimflamming flimflams flimsily flintier flintiest
flintlock flintlocks flinty flippancy flippantly flirtatiously floater
floaters floe floes floggings floodgate floodgates floodlit floorboard
floorboards floozies floozy flophouse flophouses floppiness floridly
florin florins flotation flotations flotsam flourier flouriest floury
flowerbed flowerbeds floweriness flowerpot flowerpots flub flubbed
flubbing flubs fluffiness fluidity fluidly flukier flukiest fluky
flume flumes flummox flummoxed flummoxes flummoxing fluoresce
fluoresced fluorescence fluoresces fluorescing fluoridate fluoridated
fluoridates fluoridating fluoridation fluoride fluorides fluorine
fluorite fluorocarbon fluorocarbons fluoroscope fluoroscopes fluttery
flyby flybys flycatcher flycatchers flyleaf flyleaves flypaper
flypapers flysheet flyspeck flyspecked flyspecking flyspecks
flyswatter flyswatters flyweight flyweights flywheel flywheels fob
fobbed fobbing fobs foetid fogbound fogginess foldaway folio folios
follicle follicles fomentation fondant fondants fondue fondues
fooleries foolery foolhardiness foolscap footballer footballers
footbridge footbridges footfall footfalls footlocker footlockers
footloose footman footmen footrest footrests footsie footsies footsore
fop fopped fopping foppish fops forager foragers forbiddingly
forcefulness forebear forebears forecaster forecasters forecastle
forecastles foreclose foreclosed forecloses foreclosing foreclosure
foreclosures forefeet forefoot forehand forehands foreknowledge
forelock forelocks foremast foremasts forename forenames forenoon
forenoons foreordain foreordained foreordaining foreordains foresail
foresails foreshorten foreshortened foreshortening foreshortens
forestation forester foresters forevermore forewoman forewomen
forfeiture forgather forgathered forgathering forgathers forgetfully
forgettable forgivable forklift forklifts forlornly formaldehyde
formalism formidably formlessly formlessness formulaic fornicate
fornicated fornicates fornicating forsooth forsythia forsythias
forthrightly forthrightness fortissimo fortnights fortuitously
forwardness foully foulness fountainhead fountainheads fourfold
fourscore foursome foursomes foursquare fourthly foxglove foxgloves
foxhole foxholes foxhound foxhounds foxtrot foxtrots foxtrotted
foxtrotting fractals fractionally fractious fractiously fragrantly
framer framers franchisee franchisees franchiser franchisers
frankincense frankness frappe frappes frat fraternally fratricide
fratricides frats fraudulence frazzle frazzled frazzles frazzling
freakier freakiest freakish freaky freebase freebased freebases
freebasing freebie freebies freebooter freebooters freedman freedmen
freehold freeholder freeholders freeholds freelanced freelancer
freelancers freelances freelancing freeload freeloaded freeloader
freeloaders freeloading freeloads freeman freemen freestanding
freestyle freestyles freethinker freethinkers freethinking freewheel
freewheeled freewheeling freewheels freewill frenetic frenetically
frenziedly fresco frescoes freshet freshets fretfulness fretwork
friable fricassee fricasseed fricasseeing fricassees fridge fridges
friendless frigidly fripperies frippery friskily friskiness
frivolously frizz frizzed frizzes frizzing frizzle frizzled frizzles
frizzling frogman frogmen frolicsome frontally frontiersman
frontiersmen frontispiece frontispieces frostily frostiness frowzier
frowziest frowzy fructified fructifies fructify fructifying fructose
fruitcake fruitcakes fruitfully fruitfulness fruitlessness frump
frumpier frumpiest frumps frumpy fryer fryers fuchsia fuchsias fuck
fucked fucker fuckers fucking fucks fuddle fuddled fuddles fuddling
fugue fugues fullback fullbacks fulminate fulminated fulminates
fulminating fulmination fulminations fulsome fumbler fumblers
fumigator fumigators functionaries functionary funereal funereally
fungal fungals fungicidal fungous funicular funiculars funk funked
funkier funkiest funking funks funky funniness furbelow furbish
furbished furbishes furbishing furriers furtherance furthermost furze
fusible fusillade fusillades fusions fussbudget fussbudgets fussily
fussiness fustian fustier fustiest fusty futilely futon futons
futuristics futurities futurity futz futzed futzes futzing fuzzily
fuzziness g gabardine gabardines gabbier gabbiest gabble gabbled
gabbles gabbling gabby gad gadabout gadabouts gadded gadding gadflies
gadfly gadgetry gads gaff gaffe gaffed gaffes gaffing gaffs gaggle
gaggles gainfully gainsaid gainsay gainsaying gainsays gaiter gaiters
galena gallantly gallbladder gallbladders galleon galleons gallium
gallstone gallstones galosh galoshed galoshes galoshing galvanic
galvanometer galvanometers gambol gambols gamecock gamecocks
gamekeeper gamekeepers gamely gameness gamesmanship gamete gametes
gamier gamiest gamin gamine gamines gamins gammas gamy gangland
ganglia ganglion gangrenous gannet gannets gantlet gantlets gantries
gantry gaoled gaoling gaols gapings garbageman garbanzo garbanzos
gargantuan garishly garishness garlicky garner garnered garnering
garners garnishee garnisheed garnisheeing garnishees garrote garroted
garrotes garroting garrulity garrulously garrulousness gaslight
gaslights gasohol gassier gassiest gassy gastritis gastrointestinal
gastronomic gastronomical gastronomy gasworks gatecrasher gatecrashers
gatepost gateposts gatherer gatherers gauche gaucher gauchest gaucho
gauchos gaudily gaudiness gauntness gauzier gauziest gauzy gavotte
gavottes gawkily gawkiness gayness gazebo gazebos gazer gazers
gazetteer gazetteered gazetteering gazetteers gazillion gazillions
gazpacho gearbox gearboxes gearshift gearshifts gearwheel gearwheels
gecko geckos geek geekier geekiest geeks geeky geezer geezers geisha
gelatinous gelid gelled gelling gels gemstone gemstones gendarme
gendarmes genealogist genealogists generalissimo generalissimos
generalities generative generically geniality genitalia genitive
genitives genome genomes genteel genteeler genteelest gentian gentians
gentlefolk gentlemanly gentlewoman gentlewomen gentrification
gentrified gentrifies gentrify gentrifying genuflect genuflected
genuflecting genuflection genuflections genuflects geocentric geode
geodes geodesic geodesics geographer geographers geologic geologically
geometer geometrical geometrically geophysical geophysics geopolitical
geopolitics geostationary geothermal geriatric geriatrics germane
germanium germicidal germinal gerontologist gerontologists gerontology
gerrymander gerrymandered gerrymandering gerrymanders gerund gerunds
gestate gestated gestates gestating gesticulation gesticulations
gesundheit getup gewgaw gewgaws ghastliness gherkin gherkins
ghostliness ghostwrite ghostwriter ghostwriters ghostwrites
ghostwriting ghostwritten ghostwrote ghoulish giantess giantesses
gibbet gibbeted gibbeting gibbets gibbon gibbons giblet giblets
giddily gigabyte gigabytes giggler gigglers gigglier giggliest giggly
gigolo gigolos gimcrack gimcracks gimlet gimleted gimleting gimlets
gimmickry gimmicky gimpier gimpiest gimpy gingersnap gingersnaps
gingivitis ginkgo ginkgoes ginseng gird girded girding girds girlishly
girt girted girting girts giveaway giveaways glacially gladiatorial
gladiola gladiolas gladioli gladiolus gladness glamorously glaringly
glassful glassfuls glaucoma glazier glaziers gleamings gleeful
gleefully glibness glimmerings glissandi glissando glitch glitches
glitterings glittery glitz glitzier glitziest glitzy gloaming
gloamings glob globed globetrotter globetrotters globing globs
glockenspiel glockenspiels gloomily gloominess glop glopped glopping
glops glossiness glottis glottises glowingly glowworm glowworms gluey
gluier gluiest glumly glumness gluten glutinous gluttonous
gluttonously glycerol glycogen glyph gnarlier gnarliest gnarly gneiss
gnomish goalpost goalposts goaltender goaltenders goatherd goatherds
goatskin goatskins gobbledygook gobbler gobblers goddamn goddaughter
goddaughters godforsaken godhood godliness godson godsons gofer gofers
goggled goggling goings goldbrick goldbricked goldbricking goldbricks
goldenrod goldfinch goldfinches gollies golly gonad gonads gondolier
gondoliers goober goobers goodbyes goodlier goodliest goodly gook
gooks goop gooseberries gooseberry gorgeously goriness gorse goshes
gossipy gotta gouger gougers gourmand gourmands goutier goutiest gouty
governable governance governorship govs gracefulness gracelessly
gracelessness grackle grackles grad graders grads grafter grafters
grail grainier grainiest grainy grammarian grammarians gramme grammes
granaries granary granddad granddads grandee grandees grandiloquence
grandiloquent grandma grandmas grandness grandpa grandpas grange
granges granularity granulate granulated granulates granulating
granulation graphologist graphologists graphology grapnel grapnels
grassland gratefulness gratis gravelly graybeard graybeards grayish
grayness greasepaint greasiness grebe grebes greengrocer greengrocers
greenish greenness greensward gregariously gregariousness grenadier
grenadiers griddlecake griddlecakes gridlock gridlocked gridlocking
gridlocks grievously griffin griffins grimness gringo gringos grippe
grippes grist gristlier gristliest gristly grog groggily grogginess
grommet grommets grosbeak grosbeaks grossness grotesquely grouchiness
groundbreaking groundbreakings grounder grounders groundhog groundhogs
groundings groundlessly groundswell groundswells groupie groupies
grout grouted grouting grouts grownup grownups grubbiness grubstake
grudgingly grudgings gruesomely gruffness grumbler grumblers grumpily
grumpiness grunge grungier grungiest grungy gs guacamole guano
guarantied guaranties guaranty guarantying guardedly guardhouse
guardhouses guardianship guardrail guardrails guardroom guardrooms
guardsman guardsmen guava guavas guesser guessers guesstimate
guesstimated guesstimates guesstimating guff guilder guilders guileful
guileless guiltiness guineas guitarists gulag gulags gullibility gumbo
gumbos gunboat gunboats gunfight gunfighting gunfights gunfought | |
computer_ids_list = self._get_endpoint_id_from_ip_hostname(action_result, value_to_search,
search_key_field)
# Something went wrong while getting computer ID based on given IP or hostname
if phantom.is_fail(get_endpoint_status):
return action_result.get_status()
# If no endpoint is found
if not computer_ids_list:
self.debug_print(consts.SEP_NO_DEVICE_FOUND)
return action_result.set_status(phantom.APP_ERROR,
consts.SEP_NO_DEVICE_FOUND)
computer_id = ",".join(list(set(computer_ids_list)))
# Executing API to quarantine specified endpoint
response_status, response_data = self._make_rest_call_abstract("{quarantine_api}".format(
quarantine_api=consts.SEP_QUARANTINE_ENDPOINT.format(
params=requests.compat.quote(computer_id)
)), action_result, method="post")
# Something went wrong while quarantining the endpoint(s)
if phantom.is_fail(response_status):
return action_result.get_status()
# Adding response to ActionResult Object
action_result.add_data(response_data)
# Providing Command ID in summary
try:
summary_data["command_id"] = response_data.pop("commandID_computer")
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(consts.SEP_COMMANDID_ERR.format(err))
pass
# Poll for command status
command_status, state_id_status = self._get_command_status_by_id(
action_result, summary_data.get("command_id"), timeout
)
# Something went wrong
if phantom.is_fail(command_status):
return action_result.get_status()
summary_data["state_id_status"] = state_id_status
action_result.set_status(phantom.APP_SUCCESS)
def _unblock_hash(self, param):
""" This function is used to unblock existing hashes for a group.
:param param: dictionary of input parameters
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
summary_data = action_result.update_summary({})
domain_id = None
# Getting mandatory parameters
group_id = param[consts.SEP_PARAM_GROUP_ID]
hash_values = param[consts.SEP_PARAM_HASH].replace(" ", "").split(",")
hash_values = ' '.join(hash_values).split()
# Getting list of groups to get domain ID of the group ID provided
status, group_list = self._get_groups(action_result)
# Something went wrong while getting list of groups
if phantom.is_fail(status):
return action_result.get_status()
# Iterating over group to get details of group whose ID is provided in input parameter
for group_detail in group_list:
if group_detail.get("id") != group_id:
continue
domain_id = group_detail.get("domain", {}).get("id")
break
# If no corresponding domain is found for the given group ID
if not domain_id:
self.debug_print(consts.SEL_BLACKLIST_GROUP_ID_NOT_FOUND)
return action_result.set_status(phantom.APP_ERROR, consts.SEL_BLACKLIST_GROUP_ID_NOT_FOUND)
if not hash_values:
self.debug_print(consts.SEP_INVALID_HASH)
return action_result.set_status(phantom.APP_ERROR, consts.SEP_INVALID_HASH)
fingerprint_filename = "phantom_{group_id}".format(group_id=group_id)
# Getting fingerprint file information
resp_status, file_details = self._get_fingerprint_file_info(action_result, fingerprint_filename)
# Something went wrong while getting details of fingerprint file
if phantom.is_fail(resp_status):
return action_result.get_status()
# Getting list of all hashes that are present in fingerprint file
blocked_hash_list = [hash_value.upper() for hash_value in file_details.get("data", [])]
# Total number of already blocked hash list
fingerprint_num_blocked_hash_list = len(blocked_hash_list)
# Calling a function that will check if given hash values are present in fingerprint or not.
# This function will remove all hashes provided in hash_values and will return only remaining values that
# that will be added to the fingerprint file and each hash's status
ar_hash_list, updated_block_hash_list, hash_value_status = self._update_blocked_hash_list(hash_values,
blocked_hash_list)
ar_hash_list = [x.get_dict() for x in ar_hash_list]
# If any hashes are deleted from the fingerprint file, then only file will be updated
if len(updated_block_hash_list) != fingerprint_num_blocked_hash_list:
# If all hashes in a file will be unblocked, then fingerprint file will be deleted
method = "delete"
fingerprint_api_data = None
command_id = file_details.get("id")
# If some hashes are left blocked in a fingerprint file
if updated_block_hash_list:
method = "post"
fingerprint_api_data = json.dumps({"hashType": "MD5",
"name": "phantom_{group_id}".format(group_id=group_id),
"domainId": domain_id,
"data": updated_block_hash_list})
# Execute REST API to either delete or update the fingerprint file after unblocking hashes provided
response_status, response_data = self._make_rest_call_abstract("{}/{}".format(
consts.SEP_FINGERPRINTS_ENDPOINT, command_id), action_result, data=fingerprint_api_data, method=method)
# Something went wrong while updating fingerprint file
if phantom.is_fail(response_status):
return action_result.get_status()
summary_data.update(hash_value_status)
# Adding domain ID to the fingerprint file details
file_details["domainId"] = domain_id
action_result.add_data({"hash_info": ar_hash_list, "fingerprint_file_info": file_details})
return action_result.set_status(phantom.APP_SUCCESS)
def _unquarantine_device(self, param):
""" Function to unquarantine an endpoint provided as input parameter.
:param param: Object containing group ID and endpoint ID
:return status (Success / Failure)
"""
action_result = self.add_action_result(ActionResult(dict(param)))
summary_data = action_result.update_summary({})
search_key_field = list()
computer_ids_list = list()
value_to_search = list()
# Getting computer IDs to quarantine
computer_id = param.get(consts.SEP_PARAM_COMPUTER_ID)
# Getting IP/Hostname given to quarantine
ip_hostname = param.get(consts.SEP_PARAM_IP_HOSTNAME)
# If none of the parameters are specified
if not computer_id and not ip_hostname:
self.debug_print(consts.SEP_PARAM_NOT_SPECIFIED.format(
consts.SEP_PARAM_COMPUTER_ID, consts.SEP_PARAM_IP_HOSTNAME
))
return action_result.set_status(phantom.APP_ERROR, consts.SEP_PARAM_NOT_SPECIFIED.format(
consts.SEP_PARAM_COMPUTER_ID, consts.SEP_PARAM_IP_HOSTNAME
))
# If computer_id is specified, then ip_hostname parameter will be ignored
elif computer_id:
computer_ids_list = [x.strip() for x in computer_id.split(',')]
computer_ids_list = ' '.join(computer_ids_list).split()
else:
value_to_search = ip_hostname = [x.strip() for x in ip_hostname.split(',')]
value_to_search = ip_hostname = ' '.join(value_to_search).split()
for index, item in enumerate(ip_hostname):
# Checking if given value is an IP address
if phantom.is_ip(item):
search_key_field.append("ipAddresses")
elif phantom.is_hostname(item):
search_key_field.append("computerName")
else:
self.debug_print(consts.SEP_IP_HOSTNAME_VALIDATION_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.SEP_IP_HOSTNAME_VALIDATION_ERROR)
# Optional parameter
timeout = param.get(consts.SEP_PARAM_TIMEOUT, 30)
# Validate timeout
if timeout and not str(timeout).isdigit() or timeout == 0:
self.debug_print(consts.SEP_INVALID_TIMEOUT)
return action_result.set_status(phantom.APP_ERROR, consts.SEP_INVALID_TIMEOUT)
if not computer_ids_list:
# To check for given parameter computer id, IP/ Hostname
# if not computer_id:
get_endpoint_status, computer_ids_list = self._get_endpoint_id_from_ip_hostname(action_result, value_to_search,
search_key_field)
# Something went wrong while getting computer ID based on given IP or hostname
if phantom.is_fail(get_endpoint_status):
return action_result.get_status()
# If no endpoint is found
if not computer_ids_list:
self.debug_print(consts.SEP_NO_DEVICE_FOUND)
return action_result.set_status(phantom.APP_ERROR,
consts.SEP_NO_DEVICE_FOUND)
computer_id = ",".join(list(set(computer_ids_list)))
# Executing API to quarantine specified endpoint
response_status, response_data = self._make_rest_call_abstract("{unquarantine_api}".format(
unquarantine_api=consts.SEP_UNQUARANTINE_ENDPOINT.format(
params=requests.compat.quote(computer_id)
)), action_result, method="post")
# Something went wrong while quarantining the endpoint(s)
if phantom.is_fail(response_status):
return action_result.get_status()
# Adding response to ActionResult Object
action_result.add_data(response_data)
# Providing Command ID in summary
try:
summary_data["command_id"] = response_data.pop("commandID_computer")
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(consts.SEP_COMMANDID_ERR.format(err))
pass
# Poll for command status
command_status, state_id_status = self._get_command_status_by_id(
action_result, summary_data.get("command_id"), timeout
)
# Something went wrong
if phantom.is_fail(command_status):
return action_result.get_status()
summary_data["state_id_status"] = state_id_status
action_result.set_status(phantom.APP_SUCCESS)
def _block_hash(self, param):
""" Function to block file based on given hash values.
:param param: Object containing hash values, name and description
:return status (Success/Failure)
"""
action_result = self.add_action_result(ActionResult(dict(param)))
summary_data = action_result.update_summary({})
domain_id = None
fingerprint_file_id = None
# Getting parameters to create a fingerprint file and group ID to which fingerprints will be assigned
group_id = param[consts.SEP_PARAM_GROUP_ID]
hash_values = param[consts.SEP_PARAM_HASH].replace(" ", "").split(",")
hash_values = ' '.join(hash_values).split()
if not hash_values:
self.debug_print(consts.SEP_INVALID_HASH)
return action_result.set_status(phantom.APP_ERROR, consts.SEP_INVALID_HASH)
fingerprint_filename = "phantom_{group_id}".format(group_id=group_id)
fingerprint_file_desc = "List of applications that are blocked in group having ID " \
"{group_id}".format(group_id=group_id)
# Getting list of groups to get domain ID of the group ID provided
status, group_list = self._get_groups(action_result)
# Something went wrong while getting list of groups
if phantom.is_fail(status):
return action_result.get_status()
# Iterating over group to get details of group whose ID is provided in input parameter
for group_detail in group_list:
if group_detail.get("id") != group_id:
continue
domain_id = group_detail.get("domain", {}).get("id")
break
# If group not found
if not domain_id:
self.debug_print(consts.SEL_BLACKLIST_GROUP_ID_NOT_FOUND)
return action_result.set_status(phantom.APP_ERROR, consts.SEL_BLACKLIST_GROUP_ID_NOT_FOUND)
# Dictionary containing fingerprint file details
api_data = {
"name": str(fingerprint_filename), "domainId": str(domain_id), "hashType": "MD5"
}
# If description is provided
if fingerprint_file_desc:
api_data["description"] = fingerprint_file_desc
# Getting fingerprint file information
resp_status, file_details = self._get_fingerprint_file_info(action_result, fingerprint_filename)
# Something went wrong while getting details of fingerprint file
if phantom.is_fail(resp_status):
return action_result.get_status()
# Getting list of all hashes that are present in fingerprint file
blocked_hash_list = [hash_value.upper() for hash_value in file_details.get("data", [])]
# Total number of already blocked hash list
fingerprint_num_blocked_hash_list = len(blocked_hash_list)
# Calling a function that will check if given hash values are present in fingerprint or not
ar_hash_list, updated_blocked_hash_list, hash_value_status = self._update_blocked_hash_list(hash_values,
blocked_hash_list)
ar_hash_list = [x.get_dict() for x in ar_hash_list]
# If there some new hashes that to be added to fingerprint file
if len(updated_blocked_hash_list) != fingerprint_num_blocked_hash_list:
fingerprint_endpoint_url = consts.SEP_FINGERPRINTS_ENDPOINT
# If fingerprint file is already present
if file_details.get("id"):
fingerprint_file_id = file_details.get("id")
fingerprint_endpoint_url = consts.SEP_FINGERPRINT_ENDPOINT.format(
fingerprint_id=fingerprint_file_id
)
api_data["data"] = updated_blocked_hash_list
# Executing REST API call to add a blacklist as a file fingerprint list to SEP Manager
resp_status, file_resp_data = self._make_rest_call_abstract(fingerprint_endpoint_url, action_result,
data=json.dumps(api_data), method="post")
# Something went wrong while adding file fingerprint list to SEP Manager
if phantom.is_fail(resp_status):
return action_result.get_status()
if not fingerprint_file_id:
# If fingerprint file ID is not found in the response
if isinstance(file_resp_data, dict) and not file_resp_data.get("id"):
self.debug_print(consts.SEP_BLOCK_HASH_GET_ID_ERROR)
return action_result.set_status(phantom.APP_ERROR, consts.SEP_BLOCK_HASH_GET_ID_ERROR)
# Getting file ID of fingerprint list
fingerprint_file_id = file_resp_data.get("id")
# Executing REST API call to add fingerprint file as blacklist to provided group
resp_status, blacklist_file_resp_data = self._make_rest_call_abstract(consts.SEP_BLOCK_FILE_ENDPOINT.format(
group_id=group_id, fingerprint_id=fingerprint_file_id
), action_result, method="put")
# Something went wrong while adding fingerprint file as blacklist
if phantom.is_fail(resp_status):
return action_result.get_status()
summary_data.update(hash_value_status)
api_data["id"] = file_details.get("id", fingerprint_file_id)
action_result.add_data({"hash_info": ar_hash_list, "fingerprint_file_info": api_data})
return action_result.set_status(phantom.APP_SUCCESS)
def _get_system_info(self, param):
""" This function is used to get system information.
:param param: dictionary of input parameters
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR
"""
action_result = self.add_action_result(ActionResult(dict(param)))
# Get mandatory parameter
hostname = param[consts.SEP_PARAM_HOSTNAME]
# | |
# This class will overlay the clinical information we have on hand
#!/bin/env python3
import sys
import os
import traceback
import numpy as np
import itertools
from datetime import datetime
import random
import time
random.seed(time.time())
# relative imports
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../OpenAPI/python-flask-server/")
from openapi_server.models.attribute import Attribute as EdgeAttribute
from openapi_server.models.edge import Edge
from openapi_server.models.q_edge import QEdge
# FIXME:^ this should be pulled from a YAML file pointing to the parser
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../KnowledgeSources/COHD_local/scripts/")
from COHDIndex import COHDIndex
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import overlay_utilities as ou
# TODO: boy howdy this can be modularized quite a bit. Since COHD and other clinical KP's will be adding edge attributes and/or edges, should pull out functions to easy their addition.
class OverlayClinicalInfo:
#### Constructor
def __init__(self, response, message, params):
self.response = response
self.message = message
self.parameters = params
self.who_knows_about_what = {'COHD': ['chemical_substance', 'phenotypic_feature', 'disease', 'drug',
'biolink:ChemicalSubstance', 'biolink:PhenotypicFeature', 'biolink:Disease', 'biolink:Drug']} # FIXME: replace this with information about the KP's, KS's, and their API's
self.node_curie_to_type = dict()
self.global_iter = 0
try:
self.cohdIndex = COHDIndex()
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code=error_type.__name__)
self.response.error(f"Internal Error encountered connecting to the local COHD database.")
def decorate(self):
"""
Main decorator: looks at parameters and figures out which subroutine to farm out to
:param parameters:
:return: response object
"""
# First, make a dictionary between node curie and type to make sure we're only looking at edges we can handle
self.response.info("Converting CURIE identifiers to human readable names")
try:
for key, node in self.message.knowledge_graph.nodes.items():
self.node_curie_to_type[key] = node.category # WARNING: this is a list
except:
tb = traceback.format_exc()
error_type, error, _ = sys.exc_info()
self.response.error(tb, error_code=error_type.__name__)
self.response.error(f"Something went wrong when converting names")
return self.response
parameters = self.parameters
if 'paired_concept_frequency' in parameters:
if parameters['paired_concept_frequency'] == 'true':
self.paired_concept_frequency()
# TODO: should I return the response and merge, or is it passed by reference and just return at the end?
if 'associated_concept_freq' in parameters:
if parameters['associated_concept_freq'] == 'true':
#self.associated_concept_freq() # TODO: make this function, and all the other COHD functions too
pass
if 'chi_square' in parameters:
if parameters['chi_square'] == 'true':
self.chi_square() # TODO: make this function, and all the other COHD functions too
pass
if 'observed_expected_ratio' in parameters:
if parameters['observed_expected_ratio'] == 'true':
self.observed_expected_ratio() # TODO: make this function, and all the other COHD functions too
pass
if 'relative_frequency' in parameters:
if parameters['relative_frequency'] == 'true':
#self.associated_concept_freq() # TODO: make this function, and all the other COHD functions too
pass
return self.response
def in_common(self, list1, list2):
"""
Helper function that returns true iff list1 and list2 have any elements in common
:param list1: a list of strings (intended to be biolink node types)
:param list2: another list of strings (intended to be biolink node types)
:return: True/False if they share an element in common
"""
if set(list1).intersection(set(list2)):
return True
else:
return False
def make_edge_attribute_from_curies(self, subject_curie, object_curie, subject_name="", object_name="", default=0., name=""):
"""
Generic function to make an edge attribute
:subject_curie: CURIE of the subject node for the edge under consideration
:object_curie: CURIE of the object node for the edge under consideration
:subject_name: text name of the subject node (in case the KP doesn't understand the CURIE)
:object: text name of the object node (in case the KP doesn't understand the CURIE)
:default: default value of the edge attribute
:name: name of the KP functionality you want to apply
"""
try:
# edge attributes
name = name
type = "EDAM:data_0951"
url = "http://cohd.smart-api.info/"
value = default
node_curie_to_type = self.node_curie_to_type
subject_type = node_curie_to_type[subject_curie]
object_type = node_curie_to_type[object_curie]
# figure out which knowledge provider to use # TODO: should handle this in a more structured fashion, does there exist a standardized KP API format?
KP_to_use = None
for KP in self.who_knows_about_what:
# see which KP's can label both subjects of information
if self.in_common(subject_type, self.who_knows_about_what[KP]) and self.in_common(object_type, self.who_knows_about_what[KP]):
KP_to_use = KP
if KP_to_use == 'COHD':
self.response.debug(f"Querying Columbia Open Health data for info about {subject_name} and {object_name}")
# convert CURIE to OMOP identifiers
# subject_OMOPs = [str(x['omop_standard_concept_id']) for x in COHD.get_xref_to_OMOP(subject_curie, 1)]
res = self.cohdIndex.get_concept_ids(subject_curie)
if len(res) != 0:
subject_OMOPs = res
else:
subject_OMOPs = []
# object_OMOPs = [str(x['omop_standard_concept_id']) for x in COHD.get_xref_to_OMOP(object_curie, 1)]
res = self.cohdIndex.get_concept_ids(object_curie)
if len(res) != 0:
object_OMOPs = res
else:
object_OMOPs = []
# for domain in ["Condition", "Drug", "Procedure"]:
# subject_OMOPs.update([str(x['concept_id']) for x in COHD.find_concept_ids(subject_name, domain=domain, dataset_id=3)])
# object_OMOPs.update([str(x['concept_id']) for x in COHD.find_concept_ids(object_name, domain=domain, dataset_id=3)])
#################################################
# FIXME: this was the old way
# FIXME: Super hacky way to get around the fact that COHD can't map CHEMBL drugs
# if subject_curie.split('.')[0] == 'CHEMBL':
# subject_OMOPs = [str(x['concept_id']) for x in
# COHD.find_concept_ids(subject_name, domain="Drug", dataset_id=3)]
# if object_curie.split('.')[0] == 'CHEMBL':
# object_OMOPs = [str(x['concept_id']) for x in
# COHD.find_concept_ids(object_name, domain="Drug", dataset_id=3)]
# uniquify everything
# subject_OMOPs = list(set(subject_OMOPs))
# object_OMOPs = list(set(object_OMOPs))
# Decide how to handle the response from the KP
if name == 'paired_concept_frequency':
# sum up all frequencies #TODO check with COHD people to see if this is kosher
frequency = default
# for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs):
# freq_data_list = self.cohdIndex.get_paired_concept_freq(omop1, omop2, 3) # use the hierarchical dataset
# if len(freq_data_list) != 0:
# freq_data = freq_data_list[0]
# temp_value = freq_data['concept_frequency']
# if temp_value > frequency:
# frequency = temp_value
omop_pairs = [f"{omop1}_{omop2}" for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs)]
if len(omop_pairs) != 0:
res = self.cohdIndex.get_paired_concept_freq(concept_id_pair=omop_pairs, dataset_id=3) # use the hierarchical dataset
if len(res) != 0:
maximum_concept_frequency = res[0]['concept_frequency'] # the result returned from get_paired_concept_freq was sorted by decreasing order
frequency = maximum_concept_frequency
# decorate the edges
value = frequency
elif name == 'observed_expected_ratio':
# should probably take the largest obs/exp ratio # TODO: check with COHD people to see if this is kosher
# FIXME: the ln_ratio can be negative, so I should probably account for this, but the object model doesn't like -np.inf
value = float("-inf") # FIXME: unclear in object model if attribute type dictates value type, or if value always needs to be a string
###############################
# The following code was an experiment to see if it would speed things up, leaving it out for now since it's difficult to quantify if it does speed things up given the cacheing
#if len(subject_OMOPs) < len(object_OMOPs):
# for omop1 in subject_OMOPs:
# omop_to_ln_ratio = dict()
# response = COHD.get_obs_exp_ratio(omop1, domain="", dataset_id=3) # use the hierarchical dataset
# if response:
# for res in response:
# omop_to_ln_ratio[str(res['concept_id_2'])] = res['ln_ratio']
# for omop2 in object_OMOPs:
# if omop2 in omop_to_ln_ratio:
# temp_value = omop_to_ln_ratio[omop2]
# if temp_value > value:
# value = temp_value
#else:
# for omop1 in object_OMOPs:
# omop_to_ln_ratio = dict()
# response = COHD.get_obs_exp_ratio(omop1, domain="", dataset_id=3) # use the hierarchical dataset
# if response:
# for res in response:
# omop_to_ln_ratio[str(res['concept_id_2'])] = res['ln_ratio']
# for omop2 in subject_OMOPs:
# if omop2 in omop_to_ln_ratio:
# temp_value = omop_to_ln_ratio[omop2]
# if temp_value > value:
# value = temp_value
###################################
# for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs):
# #print(f"{omop1},{omop2}")
# response = self.cohdIndex.get_obs_exp_ratio(omop1, concept_id_2=omop2, domain="", dataset_id=3) # use the hierarchical dataset
# # response is a list, since this function is overloaded and can omit concept_id_2, take the first element
# if response and 'ln_ratio' in response[0]:
# temp_val = response[0]['ln_ratio']
# if temp_val > value:
# value = temp_val
omop_pairs = [f"{omop1}_{omop2}" for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs)]
if len(omop_pairs) != 0:
res = self.cohdIndex.get_obs_exp_ratio(concept_id_pair=omop_pairs, domain="", dataset_id=3) # use the hierarchical dataset
if len(res) != 0:
maximum_ln_ratio = res[0]['ln_ratio'] # the result returned from get_paired_concept_freq was sorted by decreasing order
value = maximum_ln_ratio
elif name == 'chi_square':
value = float("inf")
# for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs):
# response = self.cohdIndex.get_chi_square(omop1, concept_id_2=omop2, domain="", dataset_id=3) # use the hierarchical dataset
# # response is a list, since this function is overloaded and can omit concept_id_2, take the first element
# if response and 'p-value' in response[0]:
# temp_val = response[0]['p-value']
# if temp_val < value: # looking at p=values, so lower is better
# value = temp_val
omop_pairs = [f"{omop1}_{omop2}" for (omop1, omop2) in itertools.product(subject_OMOPs, object_OMOPs)]
if len(omop_pairs) != 0:
| |
<filename>04.py
"""
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present.
The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
To begin, get your puzzle input.
"""
def all_pass(lines):
all_pass = []
passp = {}
for line in lines:
if line == "\n":
all_pass.append(passp)
passp = {}
elif line:
passp = get_p(line, passp)
return all_pass
def get_p(line, passp):
for i in line.strip("\n").split(" "):
k, v = get_dict(i)
passp[k] = v
return passp
def get_dict(string):
return string.split(":")
"""
def is_valid(passp):
byr = bool(passp.get("byr", False))
iyr = bool(passp.get("iyr", False))
eyr = bool(passp.get("eyr", False))
hgt = bool(passp.get("hgt", False))
hcl = bool(passp.get("hcl", False))
ecl = bool(passp.get("ecl", False))
pid = bool(passp.get("pid", False))
musts = [byr, iyr, eyr, hgt, hcl, ecl, pid]
if all(x for x in musts):
return True
else:
return False
def count_valid(all_passps):
trues = {}
for passp in all_passps:
try:
trues[is_valid(passp)] += 1
except KeyError:
trues[is_valid(passp)] = 1
return trues
passps = [{'ecl': 'gry', 'pid': '860033327', 'eyr': '2020', 'hcl': '#fffffd', 'byr': '1937', 'iyr': '2017', 'cid': '147', 'hgt': '183cm'}, {'iyr': '2013', 'ecl': 'amb', 'cid': '350', 'eyr': '2023', 'pid': '028048884', 'hcl': '#cfa07d', 'byr': '1929'}, {'hcl': '#ae17e1', 'iyr': '2013', 'eyr': '2024', 'ecl': 'brn', 'pid': '760753108', 'byr': '1931', 'hgt': '179cm'}, {'hcl': '#cfa07d', 'eyr': '2025', 'pid': '166559648', 'iyr': '2011', 'ecl': 'brn', 'hgt': '59in'}]
assert is_valid(passps[0])
assert not is_valid(passps[1])
assert is_valid(passps[2])
assert not is_valid(passps[3])
with open("04_input.txt", "r") as f:
all_passps = all_pass(f)
print(count_valid(all_passps)) # 230
"""
"""
Your puzzle answer was 230.
The first half of this puzzle is complete! It provides one gold star: *
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
"""
t_invalid = [
{"eyr":"1972", "cid":"100", "hcl":"#18171d", "ecl":"amb", "hgt":"170", "pid":"186cm", "iyr":"2018", "byr":"1926"},
{"iyr": "2019", "hcl": "#602927", "eyr": "1967", "hgt": "170cm", "ecl": "grn ", "pid": "012533040", "byr": "1946"},
{"hcl": "dab227", "iyr": "2012", "ecl": "brn", "hgt": "182cm", "pid": "021572410", "eyr": "2020", "byr": "1992", "cid": "277"},
{"hgt": "59cm", "ecl": "zzz", "eyr": "2038", "hcl": "74454a", "iyr": "2023", "pid": "3556412378", "byr": "2007"}
]
t_valid = [
{'pid': '087499704', 'hgt': '74in', 'ecl': 'grn', 'iyr': '2012', 'eyr': '2030', 'byr': '1980', 'hcl': '#623a2f'},
{'eyr': '2029', 'ecl': 'blu', 'cid': '129', 'byr': '1989', 'iyr': '2014', 'pid': '896056539', 'hcl': '#a97842', 'hgt': '165cm'},
{'hcl': '#888785', 'hgt': '164cm', 'byr': '2001', 'iyr': '2015', 'cid': '88', 'pid': '545766238', 'ecl': 'hzl', 'eyr': '2022'},
{'iyr': '2010', 'hgt': '158cm', 'hcl': '#b6652a', 'ecl': 'blu', 'byr': '1944', 'eyr': '2021', 'pid': '093154719'},
]
def is_byr(passp):
# byr (Birth Year) - four digits; at least 1920 and at most 2002.
i = passp.get("byr", False)
try:
i = int(i)
if 1920 <= i <= 2002:
return True
except:
return False
def is_iyr(passp):
#iyr (Issue Year) - four digits; at least 2010 and at most 2020.
i = passp.get("iyr", False)
try:
i = int(i)
if 2010 <= i <= 2020:
return True
except:
return False
def is_eyr(passp):
#eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
i = passp.get("eyr", False)
try:
i = int(i)
if 2020 <= i <= 2030:
return True
except:
return False
def is_hgt(passp):
#hgt (Height) - a number followed by either cm or in:
#If cm, the number must be at least 150 and at most 193.
#If in, the number must be at least 59 and at most 76.
i = passp.get("hgt", False)
if i and i.endswith("cm"):
i = int(i[:-2])
if 150 <= i <= 190:
return True
elif i and i.endswith("in"):
i = int(i[:-2])
if 59 <= i <= 76:
return True
def is_hcl(passp):
#hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
i = passp.get("hcl", False)
ii = passp.get("hcl", False)
if i and i.startswith("#"):
i = len(i[1:])
if i == 6:
return True
def is_ecl(passp):
# ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
i = passp.get("ecl", False)
musts = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
if i and (i in musts):
return True
def is_pid(passp):
# pid (Passport ID) - a nine-digit number, including leading zeroes.
i = passp.get("pid", False)
if i and len(i) == 9:
return True
def is_valid2(passp):
i = passp.get("ecl", False)
if i == "grn ":
passp["ecl"] = "grn"
byr = is_byr(passp)
iyr = is_iyr(passp)
eyr = is_eyr(passp)
hgt = is_hgt(passp)
hcl = is_hcl(passp)
ecl = is_ecl(passp)
pid = is_pid(passp)
musts = [byr, iyr, eyr, hgt, hcl, ecl, pid]
count = 0
for a in (x for x in musts if | |
__author__ = 'mnowotka'
from django.db import models
from chembl_core_model.models import *
from chembl_core_db.db.models.abstractModel import ChemblCoreAbstractModel
from chembl_core_db.db.models.abstractModel import ChemblModelMetaClass
from django.utils import six
# ----------------------------------------------------------------------------------------------------------------------
class Products(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
AD_TYPE_CHOICES = (
('OTC', 'OTC'),
('RX', 'RX'),
('DISCN', 'DISCN'),
)
NDA_TYPE_CHOICES = (
('N', 'N'),
('A', 'A'),
)
dosage_form = models.CharField(max_length=200, blank=True, null=True, help_text='The dosage form of the product (e.g., tablet, capsule etc)')
route = models.CharField(max_length=200, blank=True, null=True, help_text='The administration route of the product (e.g., oral, injection etc)')
trade_name = models.CharField(max_length=200, blank=True, null=True, help_text='The trade name for the product')
approval_date = ChemblDateField(blank=True, null=True, help_text='The FDA approval date for the product (not necessarily first approval of the active ingredient)')
ad_type = models.CharField(max_length=5, blank=True, null=True, choices=AD_TYPE_CHOICES, help_text='RX = prescription, OTC = over the counter, DISCN = discontinued')
oral = ChemblNullableBooleanField(help_text='Flag to show whether product is orally delivered')
topical = ChemblNullableBooleanField(help_text='Flag to show whether product is topically delivered')
parenteral = ChemblNullableBooleanField(help_text='Flag to show whether product is parenterally delivered')
information_source = models.CharField(max_length=100, blank=True, null=True, help_text='Source of the product information (e.g., Orange Book)')
black_box_warning = ChemblNullableBooleanField(help_text='Flag to show whether the product label has a black box warning')
product_class = models.CharField(max_length=30, blank=True, null=True)
applicant_full_name = models.CharField(max_length=200, blank=True, null=True, help_text='Name of the company applying for FDA approval')
innovator_company = ChemblNullableBooleanField(help_text='Flag to show whether the applicant is the innovator of the product')
product_id = models.CharField(primary_key=True, max_length=30, help_text='FDA application number for the product')
load_date = ChemblDateField(blank=True, null=True, help_text='The date on which one or more of the following fields were created or updated: doasge_form, route, trade_name, approval_date, ad_type, oral, topical, parenteral, information_source, or applicant_full_name). This date is assigned by the EBI parser.')
removed_date = ChemblDateField(blank=True, null=True, help_text="The date on which this product was first identified (by ebi parser) as having been removed from the information source. The recording of this date was first initiated on 30-JUN-10. Note that a small number of products are removed from OB, but then re-appear... in these cases this field is re-set to 'null' when the product re-appears..")
nda_type = models.CharField(max_length=10, blank=True, null=True, choices=NDA_TYPE_CHOICES, help_text='New Drug Application Type. The type of new drug application approval. New Drug Applications (NDA or innovator) are "N". Abbreviated New Drug Applications (ANDA or generic) are "A".') # TODO: 10 for storing one character sounds strange...
tmp_ingred_count = ChemblPositiveIntegerField(length=9, blank=True, null=True, help_text='Number of ingredients in the product, to show which are combinations')
exclude = ChemblIntegerField(length=1, blank=True, null=True, help_text='Non-FDA products, to be excluded')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class AtcClassification(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
who_name = models.CharField(max_length=150, blank=True, null=True, help_text='WHO/INN name for the compound')
level1 = models.CharField(max_length=10, blank=True, null=True, help_text='First level of classification')
level2 = models.CharField(max_length=10, blank=True, null=True, help_text='Second level of classification')
level3 = models.CharField(max_length=10, blank=True, null=True, help_text='Third level of classification')
level4 = models.CharField(max_length=10, blank=True, null=True, help_text='Fourth level of classification')
level5 = models.CharField(primary_key=True, max_length=10, help_text='Complete ATC code for compound')
level1_description = models.CharField(max_length=150, blank=True, null=True, help_text='Description of first level of classification')
level2_description = models.CharField(max_length=150, blank=True, null=True, help_text='Description of second level of classification')
level3_description = models.CharField(max_length=150, blank=True, null=True, help_text='Description of third level of classification')
level4_description = models.CharField(max_length=150, blank=True, null=True, help_text='Description of fourth level of classification')
molecules = models.ManyToManyField(MoleculeDictionary, through='MoleculeAtcClassification')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class UsanStems(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
MAJOR_CLASS_CHOICES = (
('GPCR', 'GPCR'),
('NR', 'NR'),
('PDE', 'PDE'),
('ion channel', 'ion channel'),
('kinase', 'kinase'),
('protease', 'protease'),
)
STEM_CLASS_CHOICES = (
('Suffix', 'Suffix'),
('Prefix', 'Prefix'),
('Infix', 'Infix'),
)
usan_stem_id = ChemblPositiveIntegerField(primary_key=True, length=9, help_text='Numeric primary key.')
stem = models.CharField(max_length=100, help_text='Stem defined for use in United States Adopted Names.')
subgroup = models.CharField(max_length=100, help_text='More specific subgroup of the stem defined for use in United States Adopted Names.')
annotation = models.CharField(max_length=2000, blank=True, null=True, help_text='Meaning of the stem (e.g., the class of compound it applies to).')
stem_class = models.CharField(max_length=100, blank=True, null=True, choices=STEM_CLASS_CHOICES, help_text='Indicates whether stem is used as a Prefix/Infix/Suffix.') # TODO: 100 is too long for the specified choices
major_class = models.CharField(max_length=100, blank=True, null=True, choices=MAJOR_CLASS_CHOICES, help_text='Protein family targeted by compounds of this class (e.g., GPCR/Ion channel/Protease) where known/applicable.')
who_extra = ChemblNullableBooleanField(default=False, help_text='Stem not represented in USAN list, but added from WHO INN stem list (where set to 1).')
downgraded = ChemblNullableBooleanField(default=False, help_text='Stem no longer included in USAN listing (where set to 1).')
class Meta(ChemblCoreAbstractModel.Meta):
unique_together = (("stem", "subgroup"),)
# ----------------------------------------------------------------------------------------------------------------------
class HracClassification(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
hrac_class_id = ChemblAutoField(primary_key=True, length=9, help_text='Unique numeric primary key for each level3 code')
active_ingredient = models.CharField(max_length=500, help_text='Name of active ingredient (herbicide) classified by HRAC')
level1 = models.CharField(max_length=2, help_text='HRAC group code - denoting mechanism of action of herbicide')
level1_description = models.CharField(max_length=2000, help_text='Description of mechanism of action provided by HRAC')
level2 = models.CharField(max_length=3, help_text='Indicates a chemical family within a particular HRAC group (number not assigned by HRAC)')
level2_description = models.CharField(max_length=2000, blank=True, null=True, help_text='Description of chemical family provided by HRAC')
level3 = models.CharField(max_length=5, unique=True, help_text='A unique code assigned to each ingredient (based on the level 1 and 2 HRAC classification, but not assigned by HRAC)')
hrac_code = models.CharField(max_length=2, help_text='The official HRAC classification code for the ingredient')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class IracClassification(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
LEVEL1_CHOICES = (
('A', 'A'),
('B', 'B'),
('C', 'C'),
('D', 'D'),
('E', 'E'),
('M', 'M'),
('U', 'U'),
)
LEVEL1_DESCRIPTION_CHOICES = (
('ENERGY METABOLISM', 'ENERGY METABOLISM'),
('GROWTH REGULATION', 'GROWTH REGULATION'),
('LIPID SYNTHESIS, GROWTH REGULATION', 'LIPID SYNTHESIS, GROWTH REGULATION'),
('MISCELLANEOUS', 'MISCELLANEOUS'),
('NERVE ACTION', 'NERVE ACTION'),
('NERVE AND MUSCLE ACTION', 'NERVE AND MUSCLE ACTION'),
('UNKNOWN', 'UNKNOWN'),
)
irac_class_id = ChemblAutoField(primary_key=True, length=9, help_text='Unique numeric primary key for each level4 code')
active_ingredient = models.CharField(max_length=500, help_text='Name of active ingredient (insecticide) classified by IRAC')
level1 = models.CharField(max_length=1, choices=LEVEL1_CHOICES, help_text='Class of action e.g., nerve action, energy metabolism (code not assigned by IRAC)')
level1_description = models.CharField(max_length=2000, choices=LEVEL1_DESCRIPTION_CHOICES, help_text='Description of class of action, as provided by IRAC')
level2 = models.CharField(max_length=3, help_text='IRAC main group code denoting primary site/mechanism of action')
level2_description = models.CharField(max_length=2000, help_text='Description of site/mechanism of action provided by IRAC')
level3 = models.CharField(max_length=6, help_text='IRAC sub-group code denoting chemical class of insecticide')
level3_description = models.CharField(max_length=2000, help_text='Description of chemical class or exemplifying ingredient provided by IRAC')
level4 = models.CharField(max_length=8, unique=True, help_text='A unique code assigned to each ingredient (based on the level 1, 2 and 3 IRAC classification, but not assigned by IRAC)')
irac_code = models.CharField(max_length=3, help_text='The official IRAC classification code for the ingredient')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class FracClassification(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
frac_class_id = ChemblAutoField(primary_key=True, length=9, help_text='Unique numeric primary key for each level5 code')
active_ingredient = models.CharField(max_length=500, help_text='Name of active ingredient (fungicide) classified by FRAC')
level1 = models.CharField(max_length=2, help_text='Mechanism of action code assigned by FRAC')
level1_description = models.CharField(max_length=2000, help_text='Description of mechanism of action')
level2 = models.CharField(max_length=2, help_text='Target site code assigned by FRAC')
level2_description = models.CharField(max_length=2000, blank=True, null=True, help_text='Description of target provided by FRAC')
level3 = models.CharField(max_length=6, help_text='Group number assigned by FRAC')
level3_description = models.CharField(max_length=2000, blank=True, null=True, help_text='Description of group provided by FRAC')
level4 = models.CharField(max_length=7, help_text='Number denoting the chemical group (number not assigned by FRAC)')
level4_description = models.CharField(max_length=2000, blank=True, null=True, help_text='Chemical group name provided by FRAC')
level5 = models.CharField(max_length=8, unique=True, help_text='A unique code assigned to each ingredient (based on the level 1-4 FRAC classification, but not assigned by IRAC)')
frac_code = models.CharField(max_length=4, help_text='The official FRAC classification code for the ingredient')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class PatentUseCodes(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
patent_use_code = models.CharField(primary_key=True, max_length=8, help_text='Primary key. Patent use code from FDA Orange Book')
definition = models.CharField(max_length=500, help_text='Definition for the patent use code, from FDA Orange Book.')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class DefinedDailyDose(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
DDD_UNITS_CHOICES = (
('LSU', 'LSU'),
('MU', 'MU'),
('TU', 'TU'),
('U', 'U'),
('g', 'g'),
('mcg', 'mcg'),
('mg', 'mg'),
('ml', 'ml'),
('mmol', 'mmol'),
('tablet', 'tablet'),
)
atc_code = models.ForeignKey(AtcClassification, on_delete=models.PROTECT, db_column='atc_code', help_text='ATC code for the compound (foreign key to ATC_CLASSIFICATION table)')
ddd_value = ChemblPositiveDecimalField(blank=True, null=True, max_digits=9, decimal_places=2, help_text='Value of defined daily dose')
ddd_units = models.CharField(max_length=200, blank=True, null=True, choices=DDD_UNITS_CHOICES, help_text='Units of defined daily dose')
ddd_admr = models.CharField(max_length=1000, blank=True, null=True, help_text='Administration route for dose')
ddd_comment = models.CharField(max_length=2000, blank=True, null=True, help_text='Comment')
ddd_id = ChemblAutoField(primary_key=True, length=9, help_text='Internal primary key')
class Meta(ChemblCoreAbstractModel.Meta):
pass
# ----------------------------------------------------------------------------------------------------------------------
class ProductPatents(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
prod_pat_id = ChemblAutoField(primary_key=True, length=9, help_text='Primary key')
product = models.ForeignKey(Products, on_delete=models.PROTECT, help_text='Foreign key to products table - FDA application number for the product')
patent_no = models.CharField(max_length=11, help_text='Patent numbers as submitted by the applicant holder for patents | |
and end times for a build execution phase.
"""
"""End of time span."""
end_time: Optional[datetime]
"""Start of time span."""
start_time: Optional[datetime]
def __init__(self, end_time: Optional[datetime], start_time: Optional[datetime]) -> None:
self.end_time = end_time
self.start_time = start_time
class Image:
"""An image built by the pipeline."""
"""Docker Registry 2.0 digest."""
digest: Optional[str]
"""Name used to push the container image to Google Container Registry, as
presented to `docker push`.
"""
name: Optional[str]
"""Stores timing information for pushing the specified image."""
push_timing: Optional[PushTiming]
def __init__(self, digest: Optional[str], name: Optional[str], push_timing: Optional[PushTiming]) -> None:
self.digest = digest
self.name = name
self.push_timing = push_timing
class Results:
"""Results of the build."""
"""Path to the artifact manifest. Only populated when artifacts are uploaded."""
artifact_manifest: Optional[str]
"""Time to push all non-container artifacts."""
artifact_timing: Optional[ArtifactTiming]
"""List of build step digests, in the order corresponding to build step
indices.
"""
build_step_images: Optional[List[str]]
"""List of build step outputs, produced by builder images, in the order
corresponding to build step indices.
[Cloud Builders](https://cloud.google.com/cloud-build/docs/cloud-builders)
can produce this output by writing to `$BUILDER_OUTPUT/output`.
Only the first 4KB of data is stored.
"""
build_step_outputs: Optional[List[str]]
"""Container images that were built as a part of the build."""
images: Optional[List[Image]]
"""Number of artifacts uploaded. Only populated when artifacts are uploaded."""
num_artifacts: Union[int, None, str]
def __init__(self, artifact_manifest: Optional[str], artifact_timing: Optional[ArtifactTiming], build_step_images: Optional[List[str]], build_step_outputs: Optional[List[str]], images: Optional[List[Image]], num_artifacts: Union[int, None, str]) -> None:
self.artifact_manifest = artifact_manifest
self.artifact_timing = artifact_timing
self.build_step_images = build_step_images
self.build_step_outputs = build_step_outputs
self.images = images
self.num_artifacts = num_artifacts
class Secret:
"""Pairs a set of secret environment variables containing encrypted
values with the Cloud KMS key to use to decrypt the value.
"""
"""Cloud KMS key name to use to decrypt these envs."""
kms_key_name: Optional[str]
"""Map of environment variable name to its encrypted value.
Secret environment variables must be unique across all of a build's
secrets, and must be used by at least one build step. Values can be at most
64 KB in size. There can be at most 100 secret values across all of a
build's secrets.
"""
secret_env: Optional[Dict[str, str]]
def __init__(self, kms_key_name: Optional[str], secret_env: Optional[Dict[str, str]]) -> None:
self.kms_key_name = kms_key_name
self.secret_env = secret_env
class RepoSourceClass:
"""If provided, get the source from this location in a Cloud Source
Repository.
Location of the source in a Google Cloud Source Repository.
"""
"""Regex matching branches to build.
The syntax of the regular expressions accepted is the syntax accepted by
RE2 and described at https://github.com/google/re2/wiki/Syntax
"""
branch_name: Optional[str]
"""Explicit commit SHA to build."""
commit_sha: Optional[str]
"""Directory, relative to the source root, in which to run the build.
This must be a relative path. If a step's `dir` is specified and is an
absolute path, this value is ignored for that step's execution.
"""
dir: Optional[str]
"""Only trigger a build if the revision regex does NOT match the revision
regex.
"""
invert_regex: Optional[bool]
"""ID of the project that owns the Cloud Source Repository."""
project_id: Optional[str]
"""Name of the Cloud Source Repository."""
repo_name: Optional[str]
"""Substitutions to use in a triggered build.
Should only be used with RunBuildTrigger
"""
substitutions: Optional[Dict[str, str]]
"""Regex matching tags to build.
The syntax of the regular expressions accepted is the syntax accepted by
RE2 and described at https://github.com/google/re2/wiki/Syntax
"""
tag_name: Optional[str]
def __init__(self, branch_name: Optional[str], commit_sha: Optional[str], dir: Optional[str], invert_regex: Optional[bool], project_id: Optional[str], repo_name: Optional[str], substitutions: Optional[Dict[str, str]], tag_name: Optional[str]) -> None:
self.branch_name = branch_name
self.commit_sha = commit_sha
self.dir = dir
self.invert_regex = invert_regex
self.project_id = project_id
self.repo_name = repo_name
self.substitutions = substitutions
self.tag_name = tag_name
class StorageSourceClass:
"""If provided, get the source from this location in Google Cloud Storage.
Location of the source in an archive file in Google Cloud Storage.
"""
"""Google Cloud Storage bucket containing the source (see
[Bucket Name
Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).
"""
bucket: Optional[str]
"""Google Cloud Storage generation for the object. If the generation is
omitted, the latest generation will be used.
"""
generation: Union[int, None, str]
"""Google Cloud Storage object containing the source."""
object: Optional[str]
def __init__(self, bucket: Optional[str], generation: Union[int, None, str], object: Optional[str]) -> None:
self.bucket = bucket
self.generation = generation
self.object = object
class Source:
"""The location of the source files to build."""
"""If provided, get the source from this location in a Cloud Source
Repository.
"""
repo_source: Optional[RepoSourceClass]
"""If provided, get the source from this location in Google Cloud Storage."""
storage_source: Optional[StorageSourceClass]
def __init__(self, repo_source: Optional[RepoSourceClass], storage_source: Optional[StorageSourceClass]) -> None:
self.repo_source = repo_source
self.storage_source = storage_source
class TypeEnum(Enum):
MD5 = "MD5"
NONE = "NONE"
SHA256 = "SHA256"
class FileHashElement:
"""Container message for hash values."""
"""The type of hash that was performed."""
type: Union[TypeEnum, int, None]
"""The hash value."""
value: Optional[str]
def __init__(self, type: Union[TypeEnum, int, None], value: Optional[str]) -> None:
self.type = type
self.value = value
class FileHashValue:
"""Collection of file hashes."""
file_hash: Optional[List[FileHashElement]]
def __init__(self, file_hash: Optional[List[FileHashElement]]) -> None:
self.file_hash = file_hash
class ResolvedRepoSourceClass:
"""A copy of the build's `source.repo_source`, if exists, with any
revisions resolved.
If provided, get the source from this location in a Cloud Source
Repository.
Location of the source in a Google Cloud Source Repository.
"""
"""Regex matching branches to build.
The syntax of the regular expressions accepted is the syntax accepted by
RE2 and described at https://github.com/google/re2/wiki/Syntax
"""
branch_name: Optional[str]
"""Explicit commit SHA to build."""
commit_sha: Optional[str]
"""Directory, relative to the source root, in which to run the build.
This must be a relative path. If a step's `dir` is specified and is an
absolute path, this value is ignored for that step's execution.
"""
dir: Optional[str]
"""Only trigger a build if the revision regex does NOT match the revision
regex.
"""
invert_regex: Optional[bool]
"""ID of the project that owns the Cloud Source Repository."""
project_id: Optional[str]
"""Name of the Cloud Source Repository."""
repo_name: Optional[str]
"""Substitutions to use in a triggered build.
Should only be used with RunBuildTrigger
"""
substitutions: Optional[Dict[str, str]]
"""Regex matching tags to build.
The syntax of the regular expressions accepted is the syntax accepted by
RE2 and described at https://github.com/google/re2/wiki/Syntax
"""
tag_name: Optional[str]
def __init__(self, branch_name: Optional[str], commit_sha: Optional[str], dir: Optional[str], invert_regex: Optional[bool], project_id: Optional[str], repo_name: Optional[str], substitutions: Optional[Dict[str, str]], tag_name: Optional[str]) -> None:
self.branch_name = branch_name
self.commit_sha = commit_sha
self.dir = dir
self.invert_regex = invert_regex
self.project_id = project_id
self.repo_name = repo_name
self.substitutions = substitutions
self.tag_name = tag_name
class ResolvedStorageSourceClass:
"""A copy of the build's `source.storage_source`, if exists, with any
generations resolved.
If provided, get the source from this location in Google Cloud Storage.
Location of the source in an archive file in Google Cloud Storage.
"""
"""Google Cloud Storage bucket containing the source (see
[Bucket Name
Requirements](https://cloud.google.com/storage/docs/bucket-naming#requirements)).
"""
bucket: Optional[str]
"""Google Cloud Storage generation for the object. If the generation is
omitted, the latest generation will be used.
"""
generation: Union[int, None, str]
"""Google Cloud Storage object containing the source."""
object: Optional[str]
def __init__(self, bucket: Optional[str], generation: Union[int, None, str], object: Optional[str]) -> None:
self.bucket = bucket
self.generation = generation
self.object = object
class SourceProvenance:
"""A permanent fixed identifier for source."""
"""Hash(es) of the build source, which can be used to verify that
the original source integrity was maintained in the build. Note that
`FileHashes` will only be populated if `BuildOptions` has requested a
`SourceProvenanceHash`.
The keys to this map are file paths used as build source and the values
contain the hash values for those files.
If the build source came in a single package such as a gzipped tarfile
(`.tar.gz`), the `FileHash` will be for the single path to that file.
"""
file_hashes: Optional[Dict[str, FileHashValue]]
"""A copy of the build's `source.repo_source`, if exists, with any
revisions resolved.
"""
resolved_repo_source: Optional[ResolvedRepoSourceClass]
"""A copy of the build's `source.storage_source`, if exists, with any
generations resolved.
"""
resolved_storage_source: Optional[ResolvedStorageSourceClass]
def __init__(self, file_hashes: Optional[Dict[str, FileHashValue]], resolved_repo_source: Optional[ResolvedRepoSourceClass], resolved_storage_source: Optional[ResolvedStorageSourceClass]) -> None:
self.file_hashes = file_hashes
self.resolved_repo_source = resolved_repo_source
self.resolved_storage_source = resolved_storage_source
class StatusEnum(Enum):
CANCELLED = "CANCELLED"
EXPIRED | |
#! /bin/env python3
# -*- coding: utf-8 -*-
################################################################################
#
# This file is part of PYJUNK.
#
# Copyright © 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the “Software”),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# You should have received a copy of the MIT License
# along with PYJUNK. If not, see <https://mit-license.org/>.
#
################################################################################
"""
Developp.py rassemble la définition des classes:
Developp2D
Developp(Developp2D)
"""
from __future__ import annotations
import sys
import pathlib
import math
from datetime import datetime
import Direction as di
#----- constantes pour finir le programme
NORMAL_TERMINATION = 0
ABNORMAL_TERMINATION = 1
#----- tableau (dictionnaire) pour les couleurs des tracés
couleur = {
"blanc": 0,
"rouge": 1,
"jaune": 2,
"vert": 3,
"magenta": 4,
"bleu": 5,
"violet": 6,
"gris": 8
}
#----- Classe représentant le modèle pour le calcul du développé
class Developp2D:
"""
Classe Developp2D
=================
La classe Developp2D calcule et stocke la représentation du développé, 2D par définition
:datas:
self.dictDevelopp2D: dict
self.numPanneau: int
self.lendroit2DMil: list
self.lendroit2DHaut: list
self.lendroit2DBas: list
self.lendroit2DHautChainette: list
self.lendroit2DBasChainette: list
self.lendroit2DHautCouture: list
self.endroit2DMil: Endroit2D
self.endroit2DHaut: Endroit2D
self.endroit2DBas: Endroit2D
:Example:
>>> a = Developp2D({"numPanneau": 0})
>>> print(a)
--> Developp2D :
<BLANKLINE>
.. seealso::
.. warning::
.. note::
.. todo::
"""
#-----
def __init__(self, dictDevelopp2D: dict) -> None:
self.dictDevelopp2D = dictDevelopp2D
if "numPanneau" in self.dictDevelopp2D and isinstance(self.dictDevelopp2D["numPanneau"], int):
self.numPanneau = self.dictDevelopp2D["numPanneau"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp2D')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
# les listes de points 2D qui seront placés dans le dxf
self.lendroit2DMil = []
self.lendroit2DHaut = []
self.lendroit2DBas = []
self.lendroit2DHautChainette = []
self.lendroit2DBasChainette = []
self.lendroit2DHautCouture = []
# les points 2D précédents
self.endroit2DMil = None
self.endroit2DHaut = None
self.endroit2DBas = None
#-----
@staticmethod
def calc(dictCalc: dict) -> tuple:
"""
soit 2 cercles (x-a)²+(y-b)²=r0² et (x-c)²+(y-d)²=r1², on cherche les points d'intersection
la Distance entre les centres est D = sqrt[(c-a)²+(d-b)²]
la condition pour qu'il y ait une intersection :
D < r0+r1 et D > abs(r0-r1)
les solutions sont données par :
avec δ = 1/4*sqrt((D+r0+r1)(D+r0-r1)(D-r0+r1)(-D+r0+r1))
x1,2 = (a+c)/2 + (c-a)(r0²-r1²)/(2D²) +- 2δ(b-d)/D²
y1,2 = (b+d)/2 + (d-b)(r0²-r1²)/(2D²) -+ 2δ(a-c)/D²
"""
a = dictCalc["c0"]["x"]
b = dictCalc["c0"]["y"]
c = dictCalc["c1"]["x"]
d = dictCalc["c1"]["y"]
r0 = dictCalc["r0"]
r1 = dictCalc["r1"]
dD = math.hypot((c-a), (d-b))
if not (dD < (r0+r1) and dD > math.fabs(r0-r1)):
print(f'pas de solutions')
print(f'a -> {a} b -> {b} c -> {c} d -> {d} r0 -> {r0} r1 -> {r1}')
print(f' --> Arrêt du programme')
sys.exit(ABNORMAL_TERMINATION)
part1X = (a+c)/2.
part1Y = (b+d)/2.
part2 = (r0*r0-r1*r1)/(2.*dD*dD)
part2X = (c-a)*part2
part2Y = (d-b)*part2
delta = math.sqrt((dD+r0+r1)*(dD+r0-r1)*(dD-r0+r1)*(-dD+r0+r1))/(2.*dD*dD)
deltaX = (b-d)*delta
deltaY = (a-c)*delta
x = part1X + part2X
x1 = x + deltaX
x2 = x - deltaX
if x1 > x2:
return (x1, part1Y + part2Y - deltaY)
return (x2, part1Y + part2Y + deltaY)
#-----
@staticmethod
def couture(dictCouture: dict) -> tuple:
"""
Calcul de la couture sur le bord haut du développé
Principe : à partir de 2 points successifs de la chainette donc une droite,
on calcule 2 autres points décalés de fCouture et faisant un angle intérieur de angleR
avec la droite
"""
if "fCouture" in dictCouture and isinstance(dictCouture["fCouture"], float):
fCouture = dictCouture["fCouture"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
angleR = math.radians(60.) # don't try 90°
if "endroitDeb" in dictCouture and isinstance(dictCouture["endroitDeb"], di.Endroit2D):
endroitDeb = dictCouture["endroitDeb"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "endroitFin" in dictCouture and isinstance(dictCouture["endroitFin"], di.Endroit2D):
endroitFin = dictCouture["endroitFin"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
angleChainette = di.Direction2D(endroitFin - endroitDeb).angle2D()
direction2DDeb = di.Direction2D({"vect2D": {"x": fCouture / math.tan(angleR) , "y": fCouture}})
endroit2DCoutureDeb = endroitDeb + di.Direction2D(direction2DDeb.rot2d(angleChainette))
angleChainette = di.Direction2D(endroitDeb - endroitFin).angle2D()
direction2DFin = di.Direction2D({"vect2D": {"x": fCouture / math.tan(angleR) , "y": -fCouture}})
endroit2DCoutureFin = endroitFin + di.Direction2D(direction2DFin.rot2d(angleChainette))
return (endroit2DCoutureDeb["point2D"]["x"], endroit2DCoutureDeb["point2D"]["y"], \
endroit2DCoutureFin["point2D"]["x"], endroit2DCoutureFin["point2D"]["y"] \
)
#-----
def comp(self, dictDevelopp2D: dict) -> None:
"""
Dans l'espace 2D le calcul a
"""
if dictDevelopp2D["index"] == 0:
endroit2DMil = di.Endroit2D({"point2D": {"x": 0., "y": 0.}})
self.lendroit2DMil.append(endroit2DMil)
fdist3DMilHaut = dictDevelopp2D["fdist3DMilHaut"]
endroit2DHaut = di.Endroit2D({"point2D": {"x": 0., "y": fdist3DMilHaut}})
self.lendroit2DHaut.append(endroit2DHaut)
fdist3DMilBas = dictDevelopp2D["fdist3DMilBas"]
endroit2DBas = di.Endroit2D({"point2D": {"x": 0., "y": -fdist3DMilBas}})
self.lendroit2DBas.append(endroit2DBas)
fdist3DMilHautChainette = dictDevelopp2D["fdist3DMilHautChainette"]
endroit2DHautChainette = di.Endroit2D({"point2D": {"x": 0., "y": fdist3DMilHautChainette}})
self.lendroit2DHautChainette.append(endroit2DHautChainette)
fdist3DMilBasChainette = dictDevelopp2D["fdist3DMilBasChainette"]
endroit2DBasChainette = di.Endroit2D({"point2D": {"x": 0., "y": -fdist3DMilBasChainette}})
self.lendroit2DBasChainette.append(endroit2DBasChainette)
self.lendroit2DHautCouture.append(endroit2DHautChainette)
else:
dictCalc = {}
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilMil"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautMil"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DMil = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DMil.append(endroit2DMil)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilHaut"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautHaut"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DHaut = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DHaut.append(endroit2DHaut)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilBas"]
dictCalc['c1'] = self.endroit2DBas.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DBasBas"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DBas = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DBas.append(endroit2DBas)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilHautChainette"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautHautChainette"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DHautChainette = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DHautChainette.append(endroit2DHautChainette)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilBasChainette"]
dictCalc['c1'] = self.endroit2DBas.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DBasBasChainette"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DBasChainette = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DBasChainette.append(endroit2DBasChainette)
dictCouture = {}
dictCouture["endroitDeb"] = self.lendroit2DHautChainette[-2]
dictCouture["endroitFin"] = self.lendroit2DHautChainette[-1]
dictCouture["fCouture"] = dictDevelopp2D["fCouture"]
(x1, y1, x2, y2) = Developp2D.couture(dictCouture=dictCouture)
endroit2DHautCouture = di.Endroit2D({"point2D": {"x": x1, "y": y1}})
self.lendroit2DHautCouture.append(endroit2DHautCouture)
endroit2DHautCouture = di.Endroit2D({"point2D": {"x": x2, "y": y2}})
self.lendroit2DHautCouture.append(endroit2DHautCouture)
#self.lendroit2DHautCouture.append(self.lendroit2DHautChainette[-1])
self.endroit2DMil = self.lendroit2DMil[-1]
self.endroit2DHaut = self.lendroit2DHaut[-1]
self.endroit2DBas = self.lendroit2DBas[-1]
#-----
def horiz(self) -> None:
"""
tout les points du panneau sont tournés pour être mis
à "l'horizontale" définie par l'axe du millieu du panneau
"""
alpha = di.Direction2D(self.lendroit2DMil[-1] - self.lendroit2DMil[0]).angle2D()
lendroit2DMil = []
lendroit2DHaut = []
lendroit2DBas = []
lendroit2DHautChainette = []
lendroit2DBasChainette = []
lendroit2DHautCouture = []
for i in self.lendroit2DMil:
lendroit2DMil.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHaut:
lendroit2DHaut.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DBas:
lendroit2DBas.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHautChainette:
lendroit2DHautChainette.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DBasChainette:
lendroit2DBasChainette.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHautCouture:
lendroit2DHautCouture.append(i.rot2d(fAth=-alpha))
self.lendroit2DMil = lendroit2DMil
self.lendroit2DHaut = lendroit2DHaut
self.lendroit2DBas = lendroit2DBas
self.lendroit2DHautChainette = lendroit2DHautChainette
self.lendroit2DBasChainette = lendroit2DBasChainette
self.lendroit2DHautCouture = lendroit2DHautCouture
#-----
def createDxf(self, block) -> None:
"""
la mise en place du dxf
"""
# la ligne millieu en pointillé
polyLineMil = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DMil:
polyLineMil.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du haut en pointillé
polyLineHaut = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DHaut:
polyLineHaut.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du haut de chainette en plein
polyLineHautChainette = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DHautChainette:
polyLineHautChainette.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du bas en pointillé
polyLineBas = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DBas:
polyLineBas.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du bas de chainette en plein
polyLineBasChainette = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DBasChainette:
polyLineBasChainette.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne de la couture en plein
polyLineHautCouture = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DHautCouture:
polyLineHautCouture.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# les lignes de section (la première et la dernière sont différentes)
for i in range(len(self.lendroit2DBasChainette)):
if i == 0 or i == len(self.lendroit2DBasChainette)-1:
polyLineSection = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
else:
polyLineSection = block.add_lwpolyline([], dxfattribs={'color': couleur["rouge"], 'lineweight': 20})
polyLineSection.append_points(points=[(self.lendroit2DBasChainette[i]["point2D"]["x"], \
self.lendroit2DBasChainette[i]["point2D"]["y"])], \
format='xy')
polyLineSection.append_points(points=[(self.lendroit2DHautChainette[i]["point2D"]["x"], \
| |
import numpy as np
from sfsimodels.models.abstract_models import PhysicalObject
from sfsimodels.models.systems import TwoDSystem
from sfsimodels.functions import interp_left, interp2d, interp3d
from .fns import remove_close_items, build_ele2_node_array
import hashlib
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds]
def adjust_slope_points_for_removals(sds, x, removed_y, retained_y):
for sd in sds:
for i in range(2):
if sd[0][i] == x and sd[1][i] == removed_y:
sd[1][i] = retained_y
def adj_slope_by_layers(xm, ym, sgn=1):
"""
Given mesh coordinates, adjust the mesh to be match the slope by adjust each layer
bottom left and top right coords of mesh are the slope
Parameters
----------
xm
ym
x_slope - NOT needed
y_slope
Returns
-------
"""
# TODO use centroid formula - and use o3plot to get ele-coords
ym = sgn * np.array(ym)
xm = sgn * np.array(xm)
if sgn == -1:
xm = xm[::-1]
ym = ym[::-1]
nh = len(ym[0]) - 1
# dy = min([(ym[0][-1] - ym[0][0]) / nh, (ym[-1][-1] - ym[-1][0]) / nh, 0.2])
dy1 = min([(ym[-1][-1] - ym[-1][0]) / nh])
dy0 = 0.2
y0s = ym[0][0] + np.arange(nh + 1) * dy0
y1s = ym[-1][-1] - np.arange(nh + 1) * dy1
y1s = y1s[::-1]
for i in range(nh + 1):
ym[:, i] = np.interp(xm[:, i], [xm[0][0], xm[-1][-1]], [y0s[i], y1s[i]])
xm[:, i] = xm[:, 0]
y_centres_at_xns = (ym[1:] + ym[:-1]) / 2
y_centres = (y_centres_at_xns[:, 1:] + y_centres_at_xns[:, :-1]) / 2
# get x-coordinates of centres of relevant elements
included_ele = []
dy_inds = len(ym[0, :]) - 1
for i in range(0, dy_inds):
# account for shift before assessing position of centroid
xcens = (xm[1:, i] + xm[:-1, i]) / 2 + 0.375 * (xm[1:, -1] - xm[:-1, -1])
y_surf_at_x_cens = np.interp(xcens, [xm[0][0], xm[-1][-1]], [ym[0][0], ym[-1][-1]])
inds = np.where(y_centres[:, i] < y_surf_at_x_cens)
if len(inds[0]):
included_ele.append(inds[0][0])
else:
included_ele.append(len(y_surf_at_x_cens))
included_ele.append(len(y_surf_at_x_cens))
new_xm = xm
new_ym = ym
for j in range(1, nh + 1):
new_ym[included_ele[0], j] += dy1
for i in range(1, dy_inds + 1):
x_ind_adj = included_ele[i - 1]
x_ind_adj_next = included_ele[i]
if x_ind_adj == x_ind_adj_next:
continue
# shift by half of the ele
dx = (xm[x_ind_adj + 1, i] - xm[x_ind_adj, i]) * 0.5
dxs = np.interp(xm[x_ind_adj:x_ind_adj_next, i], [xm[x_ind_adj, i], xm[x_ind_adj_next, i]], [dx, 0])
new_xm[x_ind_adj:x_ind_adj_next, i] = xm[x_ind_adj:x_ind_adj_next, i] + dxs
for j in range(i + 1, nh + 1):
new_ym[x_ind_adj_next, j] += dy1
if sgn == -1:
new_xm = new_xm[::-1]
new_ym = new_ym[::-1]
return new_xm * sgn, new_ym * sgn
def calc_centroid(xs, ys):
import numpy as np
x0 = np.array(xs)
y0 = np.array(ys)
x1 = np.roll(xs, 1, axis=-1)
y1 = np.roll(ys, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc, yc
def calc_mesh_centroids(fem):
x_inds = []
y_inds = []
if hasattr(fem.y_nodes[0], '__len__'): # can either have varying y-coordinates or single set
n_y = len(fem.y_nodes[0])
else:
n_y = 0
import numpy as np
for xx in range(len(fem.soil_grid)):
x_ele = [xx, xx + 1, xx + 1, xx]
x_inds += [x_ele for i in range(n_y - 1)]
for yy in range(len(fem.soil_grid[xx])):
y_ele = [yy, yy, yy + 1, yy + 1]
y_inds.append(y_ele)
n_eles = len(np.array(x_inds))
x_inds = np.array(x_inds).flatten()
y_inds = np.array(y_inds).flatten()
x0 = np.array(fem.x_nodes[x_inds, y_inds])
y0 = np.array(fem.y_nodes[x_inds, y_inds])
x0 = x0.reshape((n_eles, 4))
y0 = y0.reshape((n_eles, 4))
x1 = np.roll(x0, 1, axis=-1)
y1 = np.roll(y0, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc.reshape(len(fem.soil_grid), len(fem.soil_grid[0])), yc.reshape(len(fem.soil_grid), len(fem.soil_grid[0]))
class FiniteElementVary2DMeshConstructor(object): # maybe FiniteElementVertLine2DMesh
_soils = None
x_index_to_sp_index = None
_inactive_value = 1000000
def __init__(self, tds, dy_target, x_scale_pos=None, x_scale_vals=None, dp: int = None, fd_eles=0, auto_run=True,
use_3d_interp=False, smooth_surf=False, force_x2d=False, min_scale=0.5, max_scale=2.0,
allowable_slope=0.25, smooth_ratio=1.):
"""
Builds a finite element mesh of a two-dimension system
Parameters
----------
tds: TwoDSystem
A two dimensional system of models
dy_target: float
Target height of elements
x_scale_pos: array_like
x-positions used to provide scale factors for element widths
x_scale_vals: array_like
scale factors for element widths
dp: int
Number of decimal places
fd_eles: int
if =0 then elements corresponding to the foundation are removed, else provide element id
smooth_surf: bool
if true then changes in angle of the slope must be less than 90 degrees, builds VaryXY mesh
"""
self.min_scale = min_scale
self.max_scale = max_scale
self.allowable_slope = allowable_slope
self.smooth_ratio = smooth_ratio
assert isinstance(tds, TwoDSystem)
self.tds = tds
self.dy_target = dy_target
if x_scale_pos is None:
x_scale_pos = [0, tds.width]
if x_scale_vals is None:
x_scale_vals = [1., 1.]
self.x_scale_pos = np.array(x_scale_pos)
self.x_scale_vals = np.array(x_scale_vals)
self.dp = dp
self.xs = list(self.tds.x_sps)
self.smooth_surf = smooth_surf
self.xs.append(tds.width)
self.xs = np.array(self.xs)
inds = np.where(np.array(tds.x_surf) <= tds.width)
self.x_surf = np.array(tds.x_surf)[inds]
if tds.width not in self.x_surf:
self.x_surf = np.insert(self.x_surf, len(self.x_surf), tds.width)
self.y_surf = np.interp(self.x_surf, tds.x_surf, tds.y_surf)
self.y_surf_at_sps = np.interp(self.xs, tds.x_surf, tds.y_surf)
self._soils = []
self._soil_hashes = []
for i in range(len(self.tds.sps)):
for yy in range(1, self.tds.sps[i].n_layers + 1):
sl = self.tds.sps[i].layer(yy)
if sl.unique_hash not in self._soil_hashes:
self._soil_hashes.append(sl.unique_hash)
self._soils.append(sl)
self.y_surf_at_xcs = None
self.yd = None
self.xcs_sorted = None
self.sds = None
self.y_blocks = None
self.y_coords_at_xcs = None
self.x_nodes = None
self.y_nodes = None
self.x_nodes2d = None
self._femesh = None
if auto_run:
self.get_special_coords_and_slopes() # Step 1
self.set_init_y_blocks()
self.adjust_blocks_to_be_consistent_with_slopes()
self.trim_grid_to_target_dh()
self.build_req_y_node_positions()
self.set_x_nodes()
if use_3d_interp:
self.build_y_coords_grid_via_3d_interp()
else:
self.build_y_coords_grid_via_propagation()
if self.dp is not None:
self.set_to_decimal_places()
if smooth_surf:
self.adjust_for_smooth_surface()
self.set_soil_ids_to_vary_xy_grid()
elif force_x2d:
self.x_nodes2d = self.x_nodes[:, np.newaxis] * np.ones_like(self.y_nodes)
self.set_soil_ids_to_vary_xy_grid()
else:
self.set_soil_ids_to_vary_y_grid()
self.create_mesh()
if smooth_surf:
self.femesh.tidy_unused_mesh()
if not fd_eles:
self.exclude_fd_eles()
def get_special_coords_and_slopes(self):
"""Find the coordinates, layer boundaries and surface slopes that should be maintained in the FE mesh"""
fd_coords = []
x_off = 0.0
yd = {}
for i in range(len(self.x_surf)):
yd[self.x_surf[i]] = []
if self.tds.width not in yd:
yd[self.tds.width] = []
sds = [] # slope dict (stored left-to-right and bottom-to-top)
for i in range(len(self.tds.bds)):
x_bd = self.tds.x_bds[i]
bd = self.tds.bds[i]
fd_centre_x = x_bd + bd.x_fd
y_surf = np.interp(fd_centre_x, self.x_surf, self.y_surf)
if bd.fd.width > self.dy_target:
fd_coords.append(fd_centre_x)
x_left = fd_centre_x - bd.fd.width / 2
x_right = fd_centre_x + bd.fd.width / 2
if x_left not in yd:
yd[x_left] = []
yd[x_left] += [y_surf, -bd.fd.depth + y_surf]
if x_right not in yd:
yd[x_right] = []
yd[x_right] += [y_surf, -bd.fd.depth + y_surf]
sds.append([[x_left, x_right], [y_surf, y_surf]])
sds.append([[x_left, x_right], [-bd.fd.depth + y_surf, -bd.fd.depth + y_surf]])
for i in range(len(self.tds.sps)):
x_curr = self.tds.x_sps[i]
if x_curr > self.tds.width:
continue
if i == len(self.tds.sps) - 1:
x_next = self.tds.width
else:
x_next = self.tds.x_sps[i + 1] - x_off
# get important x-coordinates that are between two soil profiles
if x_curr not in yd:
yd[x_curr] = []
if x_next not in yd and x_next < self.tds.width:
yd[x_next] = []
x_coords = np.array(list(yd))
inds = np.where((x_coords >= x_curr) & (x_coords <= x_next))
xs = np.sort(x_coords[inds])
y_surf_at_xs = np.interp(xs, self.x_surf, self.y_surf)
y_curr_surf = y_surf_at_xs[0]
# Depths from defined soil profile
int_yy = []
angles = []
for yy in range(1, self.tds.sps[i].n_layers + 1):
# if self.tds.sps[i].layer_depth(yy) >= 0:
y = -self.tds.sps[i].layer_depth(yy) + y_curr_surf
if -y < self.tds.height:
int_yy.append(y)
angles.append(self.tds.sps[i].x_angles[yy - 1])
angles = np.array(angles)
if xs[0] not in yd:
yd[xs[0]] = []
for j in range(len(xs) - 1):
x0 = xs[j]
x_next = xs[j + 1]
if x_next not in yd:
yd[x_next] = []
x0_diff = x0 - x_curr
xn_diff = x_next - x_curr
if y_surf_at_xs[j] not in yd[x0]:
yd[x0].append(y_surf_at_xs[j])
if y_surf_at_xs[j + 1] not in yd[x_next]:
yd[x_next].append(y_surf_at_xs[j + 1])
for k in range(len(int_yy)):
if angles[k] is None or np.isnan(angles[k]):
continue
y_curr = int_yy[k] + angles[k] * x0_diff
if y_curr < y_surf_at_xs[j] and y_curr not in yd[x0]:
yd[x0].append(y_curr)
y_next = int_yy[k] + angles[k] * xn_diff
if y_next < y_surf_at_xs[j + 1] and y_next not in yd[x_next]:
yd[x_next].append(y_next)
if y_curr | |
trigger.
:Parameters:
- `trigger_link`: str, the link to the trigger.
- `options`: dict, the request options for the request.
:Returns:
dict
"""
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.DeleteResource(path,
'triggers',
trigger_id,
None,
options)
def ReplaceUserDefinedFunction(self, udf_link, udf, options={}):
"""Replaces a user defined function and returns it.
:Parameters:
- `udf_link`: str, the link to the user defined function.
- `udf`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
DocumentClient.__ValidateResource(udf)
udf = udf.copy()
if udf.get('serverScript'):
udf['body'] = str(udf['serverScript'])
elif udf.get('body'):
udf['body'] = str(udf['body'])
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.Replace(udf,
path,
'udfs',
udf_id,
None,
options)
def DeleteUserDefinedFunction(self, udf_link, options={}):
"""Deletes a user defined function.
:Parameters:
- `udf_link`: str, the link to the user defined function.
- `options`: dict, the request options for the request.
:Returns:
dict
"""
path = base.GetPathFromLink(udf_link)
udf_id = base.GetResourceIdOrFullNameFromLink(udf_link)
return self.DeleteResource(path,
'udfs',
udf_id,
None,
options)
def ExecuteStoredProcedure(self, sproc_link, params):
"""Executes a store procedure.
:Parameters:
- `sproc_link`: str, the link to the stored procedure.
- `params`: dict, list or None
:Returns:
dict
"""
initial_headers = dict(self.default_headers)
initial_headers.update({
http_constants.HttpHeaders.Accept: (
runtime_constants.MediaTypes.Json)
})
if params and not type(params) is list:
params = [params]
url_connection = self.url_connection
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
sproc_id,
'sprocs',
{})
result, self.last_response_headers = self.__Post(url_connection,
path,
params,
headers)
return result
def ReplaceStoredProcedure(self, sproc_link, sproc, options={}):
"""Replaces a stored procedure and returns it.
:Parameters:
- `sproc_link`: str, the link to the stored procedure.
- `sproc`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
DocumentClient.__ValidateResource(sproc)
sproc = sproc.copy()
if sproc.get('serverScript'):
sproc['body'] = str(sproc['serverScript'])
elif sproc.get('body'):
sproc['body'] = str(sproc['body'])
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Replace(sproc,
path,
'sprocs',
sproc_id,
None,
options)
def DeleteStoredProcedure(self, sproc_link, options={}):
"""Deletes a stored procedure.
:Parameters:
- `sproc_link`: str, the link to the stored procedure.
- `options`: dict, the request options for the request.
:Returns:
dict
"""
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.DeleteResource(path,
'sprocs',
sproc_id,
None,
options)
def DeleteConflict(self, conflict_link, options={}):
"""Deletes a conflict.
:Parameters:
- `conflict_link`: str, the link to the conflict.
- `options`: dict, the request options for the request.
:Returns:
dict
"""
path = base.GetPathFromLink(conflict_link)
conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link)
return self.DeleteResource(path,
'conflicts',
conflict_id,
None,
options)
def ReplaceOffer(self, offer_link, offer):
"""Replaces an offer and returns it.
:Parameters:
- `offer_link`: str, the link to the offer.
- `offer`: dict
:Returns:
dict
"""
DocumentClient.__ValidateResource(offer)
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Replace(offer, path, 'offers', offer_id, None, None)
def ReadOffer(self, offer_link):
"""Reads an offer.
:Parameters:
- `offer_link`: str, the link to the offer.
:Returns:
dict
"""
path = base.GetPathFromLink(offer_link)
offer_id = base.GetResourceIdOrFullNameFromLink(offer_link)
return self.Read(path, 'offers', offer_id, None, {})
def ReadOffers(self, options={}):
"""Reads all offers.
:Parameters:
- `options`: dict, the request options for the request
:Returns:
query_iterable.QueryIterable
"""
return self.QueryOffers(None, options)
def QueryOffers(self, query, options={}):
"""Query for all offers.
:Parameters:
- `query`: str or dict.
- `options`: dict, the request options for the request
:Returns:
query_iterable.QueryIterable
"""
def fetch_fn(options):
return self.__QueryFeed('/offers',
'offers',
'',
lambda r: r['Offers'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(options, self.retry_policy, fetch_fn)
def GetDatabaseAccount(self):
"""Gets database account info.
:Returns:
documents.DatabaseAccount
"""
initial_headers = dict(self.default_headers)
headers = base.GetHeaders(self,
initial_headers,
'get',
'', # path
'', # id
'', # type
{});
result, self.last_response_headers = self.__Get(self.url_connection,
'',
headers)
database_account = documents.DatabaseAccount()
database_account.DatabasesLink = '/dbs/'
database_account.MediaLink = '/media/'
if (http_constants.HttpHeaders.MaxMediaStorageUsageInMB in
self.last_response_headers):
database_account.MaxMediaStorageUsageInMB = (
self.last_response_headers[
http_constants.HttpHeaders.MaxMediaStorageUsageInMB])
if (http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in
self.last_response_headers):
database_account.CurrentMediaStorageUsageInMB = (
self.last_response_headers[
http_constants.HttpHeaders.CurrentMediaStorageUsageInMB])
database_account.ConsistencyPolicy = result['userConsistencyPolicy']
return database_account
def Create(self, body, path, type, id, initial_headers, options={}):
"""Creates a DocumentDB resource and returns it.
:Parameters:
- `body`: dict
- `path`: str
- `type`: str
- `id`: str
- `initial_headers`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
result, self.last_response_headers = self.__Post(self.url_connection,
path,
body,
headers)
return result
def Upsert(self, body, path, type, id, initial_headers, options={}):
"""Upserts a DocumentDB resource and returns it.
:Parameters:
- `body`: dict
- `path`: str
- `type`: str
- `id`: str
- `initial_headers`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
headers[http_constants.HttpHeaders.IsUpsert] = True
result, self.last_response_headers = self.__Post(self.url_connection,
path,
body,
headers)
return result
def Replace(self, resource, path, type, id, initial_headers, options={}):
"""Replaces a DocumentDB resource and returns it.
:Parameters:
- `resource`: dict
- `path`: str
- `type`: str
- `id`: str
- `initial_headers`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'put',
path,
id,
type,
options)
result, self.last_response_headers = self.__Put(self.url_connection,
path,
resource,
headers)
return result
def Read(self, path, type, id, initial_headers, options={}):
"""Reads a DocumentDB resource and returns it.
:Parameters:
- `path`: str
- `type`: str
- `id`: str
- `initial_headers`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'get',
path,
id,
type,
options)
result, self.last_response_headers = self.__Get(self.url_connection,
path,
headers)
return result
def DeleteResource(self, path, type, id, initial_headers, options={}):
"""Deletes a DocumentDB resource and returns it.
:Parameters:
- `path`: str
- `type`: str
- `id`: str
- `initial_headers`: dict
- `options`: dict, the request options for the request.
:Returns:
dict
"""
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'delete',
path,
id,
type,
options)
result, self.last_response_headers = self.__Delete(self.url_connection,
path,
headers)
return result
def __Get(self, url, path, headers):
"""DocumentDB 'GET' http request.
:Parameters:
- `url`: str
- `path`: str
- `headers`: dict
:Returns:
tuple (result, headers), and result and headers are both
dicts
"""
return synchronized_request.SynchronizedRequest(self.connection_policy,
'GET',
url,
path,
None,
None,
headers)
def __Post(self, url, path, body, headers):
"""DocumentDB 'POST' http request.
:Parameters:
- `url`: str
- `path`: str
- `body`: str, unicode or dict
- `headers`: dict
:Returns:
tuple (result, headers), and result and headers are both
dicts
"""
return synchronized_request.SynchronizedRequest(self.connection_policy,
'POST',
url,
path,
body,
query_params=None,
headers=headers)
def __Put(self, url, path, body, headers):
"""DocumentDB 'PUT' http request.
:Parameters:
- `url`: str
- `path`: str
- `body`: str, unicode or dict
- `headers`: dict
:Returns:
tuple (result, headers), and result and headers are both
dicts
"""
return synchronized_request.SynchronizedRequest(self.connection_policy,
'PUT',
url,
path,
body,
query_params=None,
headers=headers)
def __Delete(self, url, path, headers):
"""DocumentDB 'DELETE' http request.
:Parameters:
- `url`: str
- `path`: str
- `headers`: dict
:Returns:
tuple (result, headers), and result and headers are both
dicts
"""
return synchronized_request.SynchronizedRequest(self.connection_policy,
'DELETE',
url,
path,
request_data=None,
query_params=None,
headers=headers)
def QueryFeed(self, path, collection_id, query, options):
"""Query Feed for Document Collection resource.
:Parameters:
- `path`: str, path to the document collection
- `collection_id`: str, id of the document collection
- `query`: str or dict
- `options`: dict, the request options for the request.
:Returns:
tuple
"""
return self.__QueryFeed(path,
'docs',
collection_id,
lambda r: r['Documents'],
lambda _, b: b,
query,
options), self.last_response_headers
def __QueryFeed(self,
path,
type,
id,
result_fn,
create_fn,
query,
options={}):
"""Query for more than one DocumentDB resources.
Raises :exc:`SystemError` is the query compatibility mode is undefined.
:Parameters:
- `path`: str
- `type`: str
- `id`: str
- `result_fn`: function
- `create_fn`: function
- `query`: str or dict
- `options`: dict, the request options for the request.
:Returns:
list
"""
if query:
__GetBodiesFromQueryResult = result_fn
else:
def __GetBodiesFromQueryResult(result):
return [create_fn(self, body) for body in result_fn(result)]
url_connection = self.url_connection
initial_headers = self.default_headers.copy()
# Copy to make sure that default_headers won't be changed.
if query == None:
headers = base.GetHeaders(self,
initial_headers,
'get',
path,
id,
type,
options)
result, self.last_response_headers = self.__Get(url_connection,
path,
headers)
return __GetBodiesFromQueryResult(result)
else:
query = self.__CheckAndUnifyQueryFormat(query)
initial_headers[http_constants.HttpHeaders.IsQuery] = 'true'
if (self._query_compatibility_mode == DocumentClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == DocumentClient._QueryCompatibilityMode.Query):
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.QueryJson
elif self._query_compatibility_mode == DocumentClient._QueryCompatibilityMode.SqlQuery:
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.SQL
else:
raise SystemError('Unexpected query compatibility mode.')
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
result, self.last_response_headers = self.__Post(url_connection,
path,
query,
headers)
return __GetBodiesFromQueryResult(result)
def __CheckAndUnifyQueryFormat(self, query_body):
| |
413
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_PULSE_WIDTH = IVI_SPECIFIC_ATTR_BASE + 414
# - Voltage
KTM960X_ATTR_OUTPUT_VOLTAGE_AUTO_RANGE_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 420
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_BASE_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 421
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_BASE_TYPE = (
IVI_SPECIFIC_ATTR_BASE + 422
) # ViInt32, read-write
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_LEVEL = IVI_SPECIFIC_ATTR_BASE + 423
KTM960X_ATTR_OUTPUT_VOLTAGE_POST_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 424
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_POST_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 425
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_POST_TYPE = (
IVI_SPECIFIC_ATTR_BASE + 426
) # ViInt32, read-write
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_RANGE = IVI_SPECIFIC_ATTR_BASE + 427
KTM960X_ATTR_OUTPUT_VOLTAGE_RANGE_LOWER_LIMIT = (
IVI_SPECIFIC_ATTR_BASE + 428
) # ViReal64, read-write
KTM960X_ATTR_OUTPUT_VOLTAGE_TRIGGERED_LEVEL = (
IVI_SPECIFIC_ATTR_BASE + 429
) # ViReal64, read-write
# - WaitTime
KTM960X_ATTR_OUTPUT_WAITTIME_AUTO_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 430
) # ViBoolean, read-write
KTM960X_ATTR_OUTPUT_WAITTIME_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 431
) # ViBoolean, read-write
# ViReal64, read-write
KTM960X_ATTR_OUTPUT_WAITTIME_GAIN = IVI_SPECIFIC_ATTR_BASE + 432
KTM960X_ATTR_OUTPUT_WAITTIME_OFFSET = (
IVI_SPECIFIC_ATTR_BASE + 433
) # ViReal64, read-write
# - Transient
# ViInt32, read-only
KTM960X_ATTR_TRANSIENT_COUNT = IVI_SPECIFIC_ATTR_BASE + 441
KTM960X_ATTR_TRANSIENT_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 521
) # ViBoolean, read-write
# - Arm
# ViInt32, read-write
KTM960X_ATTR_TRANSIENT_ARM_BYPASS = IVI_SPECIFIC_ATTR_BASE + 442
# ViInt32, read-write
KTM960X_ATTR_TRANSIENT_ARM_COUNT = IVI_SPECIFIC_ATTR_BASE + 443
# ViReal64, read-write
KTM960X_ATTR_TRANSIENT_ARM_DELAY = IVI_SPECIFIC_ATTR_BASE + 444
# ViInt32, read-write
KTM960X_ATTR_TRANSIENT_ARM_SOURCE = IVI_SPECIFIC_ATTR_BASE + 445
# ViReal64, read-write
KTM960X_ATTR_TRANSIENT_ARM_TIMER = IVI_SPECIFIC_ATTR_BASE + 446
KTM960X_ATTR_TRANSIENT_ARM_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 447
) # ViBoolean, read-write
# - Current
KTM960X_ATTR_TRANSIENT_CURRENT_CENTER = (
IVI_SPECIFIC_ATTR_BASE + 448
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_LIST_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 449
) # ViInt32, read-only
KTM960X_ATTR_TRANSIENT_CURRENT_LIST_START_POINT = (
IVI_SPECIFIC_ATTR_BASE + 450
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_MODE = (
IVI_SPECIFIC_ATTR_BASE + 451
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_SPAN = (
IVI_SPECIFIC_ATTR_BASE + 452
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_START = (
IVI_SPECIFIC_ATTR_BASE + 453
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_STEP = (
IVI_SPECIFIC_ATTR_BASE + 454
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_STOP = (
IVI_SPECIFIC_ATTR_BASE + 455
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_SWEEP_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 456
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_LIST_OUTPUT_TRIGGER_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 519
) # ViBoolean, read-write
KTM960X_ATTR_TRANSIENT_CURRENT_TRIGGER_LIST_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 520
) # ViInt32, read-only
# - Sweep
KTM960X_ATTR_TRANSIENT_SWEEP_DIRECTION = (
IVI_SPECIFIC_ATTR_BASE + 457
) # ViInt32, read-write
# ViInt32, read-write
KTM960X_ATTR_TRANSIENT_SWEEP_MODE = IVI_SPECIFIC_ATTR_BASE + 458
KTM960X_ATTR_TRANSIENT_SWEEP_OUTPUT_RANGING_MODE = (
IVI_SPECIFIC_ATTR_BASE + 459
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_SWEEP_OUTPUT_SCALE = (
IVI_SPECIFIC_ATTR_BASE + 460
) # ViInt32, read-write
# - Trigger
KTM960X_ATTR_TRANSIENT_TRIGGER_BYPASS = (
IVI_SPECIFIC_ATTR_BASE + 461
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_CONTINUOUS_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 462
) # ViBoolean, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_COUNT = (
IVI_SPECIFIC_ATTR_BASE + 463
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_DELAY = (
IVI_SPECIFIC_ATTR_BASE + 464
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_SOURCE = (
IVI_SPECIFIC_ATTR_BASE + 465
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_TIMER = (
IVI_SPECIFIC_ATTR_BASE + 466
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_TRIGGER_TRIGGER_OUTPUT_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 467
) # ViBoolean, read-write
# - Voltage
KTM960X_ATTR_TRANSIENT_VOLTAGE_CENTER = (
IVI_SPECIFIC_ATTR_BASE + 468
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_LIST_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 469
) # ViInt32, read-only
KTM960X_ATTR_TRANSIENT_VOLTAGE_LIST_START_POINT = (
IVI_SPECIFIC_ATTR_BASE + 470
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_MODE = (
IVI_SPECIFIC_ATTR_BASE + 471
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_SPAN = (
IVI_SPECIFIC_ATTR_BASE + 472
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_START = (
IVI_SPECIFIC_ATTR_BASE + 473
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_STEP = (
IVI_SPECIFIC_ATTR_BASE + 474
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_STOP = (
IVI_SPECIFIC_ATTR_BASE + 475
) # ViReal64, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_SWEEP_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 476
) # ViInt32, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_LIST_OUTPUT_TRIGGER_ENABLED = (
IVI_SPECIFIC_ATTR_BASE + 522
) # ViBoolean, read-write
KTM960X_ATTR_TRANSIENT_VOLTAGE_TRIGGER_LIST_POINTS = (
IVI_SPECIFIC_ATTR_BASE + 523
) # ViInt32, read-only
# *------------------------ Attribute Value Defines -------------------------*
# - Defined values for
KTM960X_VAL_STATUS_BYTE_FLAGS_USER0 = 1
KTM960X_VAL_STATUS_BYTE_FLAGS_USER1 = 2
KTM960X_VAL_STATUS_BYTE_FLAGS_USER2 = 4
KTM960X_VAL_STATUS_BYTE_FLAGS_USER3 = 8
KTM960X_VAL_STATUS_BYTE_FLAGS_MESSAGE_AVAILABLE = 16
KTM960X_VAL_STATUS_BYTE_FLAGS_EVENT_STATUS_REGISTER = 32
KTM960X_VAL_STATUS_BYTE_FLAGS_REQUESTING_SERVICE = 64
KTM960X_VAL_STATUS_BYTE_FLAGS_USER7 = 128
# - Defined values for
# parameter Buttons in function KtM960x_SystemSfpMessageBox
KTM960X_VAL_MESSAGE_BOX_BUTTONS_OK = 0
KTM960X_VAL_MESSAGE_BOX_BUTTONS_OK_CANCEL = 1
KTM960X_VAL_MESSAGE_BOX_BUTTONS_YES_NO = 2
# - Defined values for
# parameter Val in function KtM960x_SystemSfpMessageBox
KTM960X_VAL_MESSAGE_BOX_RESULTS_NONE = 0
KTM960X_VAL_MESSAGE_BOX_RESULTS_OK = 1
KTM960X_VAL_MESSAGE_BOX_RESULTS_CANCEL = 2
KTM960X_VAL_MESSAGE_BOX_RESULTS_YES = 3
KTM960X_VAL_MESSAGE_BOX_RESULTS_NO = 4
# - Defined values for
# attribute KTM960X_ATTR_MODULE_CALIBRATION_STATUS
# attribute KTM960X_ATTR_CALIBRATION_STATUS
KTM960X_VAL_CALIBRATION_STATUS_DUE = 1
KTM960X_VAL_CALIBRATION_STATUS_EXPIRED = 2
KTM960X_VAL_CALIBRATION_STATUS_INSTRUMENT_CALIBRATED = 0
KTM960X_VAL_CALIBRATION_STATUS_MODULES_CALIBRATED = 3
KTM960X_VAL_CALIBRATION_STATUS_NOT_CALIBRATED = 4
KTM960X_VAL_CALIBRATION_STATUS_NOT_SUBJECT_TO_CALIBRATION = 5
# - Defined values for
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_MASTER_PRODUCER = 0
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_MASTER_CONSUMER = 1
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_SLAVE_PRODUCER = 2
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_SLAVE_CONSUMER = 3
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_STREAMING_MASTER_PRODUCER = 4
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_STREAMING_MASTER_CONSUMER = 5
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_STREAMING_SLAVE_PRODUCER = 6
KTM960X_VAL_PEER_TO_PEER_PORT_ROLE_STREAMING_SLAVE_CONSUMER = 7
# - Defined values for
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_OPAQUE = 0
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_I8 = 1
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_I16 = 2
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_I32 = 3
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_F32 = 4
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_F64 = 5
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_K_I32V1 = 101
KTM960X_VAL_PEER_TO_PEER_DATA_FORMAT_K_I24M8 = 102
# - Defined values for
KTM960X_VAL_DEVICE_SYNC_RESOURCES_FP_SYNC = 32768
KTM960X_VAL_DEVICE_SYNC_RESOURCES_NONE = 0
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_LBL6 = 2048
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_LBR6 = 1024
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_STAR = 512
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG0 = 1
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG1 = 2
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG2 = 4
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG3 = 8
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG4 = 16
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG5 = 32
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG6 = 64
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXI_TRIG7 = 128
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXIE_DSTARA = 4096
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXIE_DSTARB = 8192
KTM960X_VAL_DEVICE_SYNC_RESOURCES_PXIE_DSTARC = 16384
# - Defined values for
KTM960X_VAL_DEVICE_SYNC_STATE_ARM = 1
KTM960X_VAL_DEVICE_SYNC_STATE_IDLE = 0
KTM960X_VAL_DEVICE_SYNC_STATE_RUN = 3
KTM960X_VAL_DEVICE_SYNC_STATE_TRIGGER = 2
KTM960X_VAL_DEVICE_SYNC_STATE_UNKNOWN = 4
# - Defined values for
KTM960X_VAL_DEVICE_SYNC_ROLE_GROUP_MASTER = 2
KTM960X_VAL_DEVICE_SYNC_ROLE_LOCAL_MASTER = 4
KTM960X_VAL_DEVICE_SYNC_ROLE_OFF = 0
KTM960X_VAL_DEVICE_SYNC_ROLE_SLAVE = 3
KTM960X_VAL_DEVICE_SYNC_ROLE_SYSTEM_MASTER = 1
KTM960X_VAL_DEVICE_SYNC_ROLE_NOT_SUPPORTED = -1
# - Defined values for
KTM960X_VAL_ODI_LANE_RATE_12R5G = 1
KTM960X_VAL_ODI_LANE_RATE_14R1G = 2
# - Defined values for
KTM960X_VAL_ODI_DIRECTIONALITY_BIDIRECTIONAL = 1
KTM960X_VAL_ODI_DIRECTIONALITY_PRODUCER = 2
KTM960X_VAL_ODI_DIRECTIONALITY_CONSUMER = 3
KTM960X_VAL_ODI_DIRECTIONALITY_DUAL_UNIDIRECTIONAL = 4
# - Defined values for
KTM960X_VAL_ODI_FLOW_CONTROL_NONE = 1
KTM960X_VAL_ODI_FLOW_CONTROL_INBAND = 2
KTM960X_VAL_ODI_FLOW_CONTROL_INBAND_PER_CHANNEL = 3
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_1WIRE = 4
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_0 = 100
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_1 = 101
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_2 = 102
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_3 = 103
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_4 = 104
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_5 = 105
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_6 = 106
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_7 = 107
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_8 = 108
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_9 = 109
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_10 = 110
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_11 = 111
KTM960X_VAL_ODI_FLOW_CONTROL_OUTOFBAND_BACKPLANE_12 = 112
# - Defined values for
KTM960X_VAL_ODI_PORT_STATUS_ACTIVE = 1
KTM960X_VAL_ODI_PORT_STATUS_TX_READY = 2
KTM960X_VAL_ODI_PORT_STATUS_RX_READY = 4
KTM960X_VAL_ODI_PORT_STATUS_RX_LANE_ERROR = 8
KTM960X_VAL_ODI_PORT_STATUS_RX_BURST_MAX_ERROR = 16
KTM960X_VAL_ODI_PORT_STATUS_RX_CRC_ERROR = 32
KTM960X_VAL_ODI_PORT_STATUS_RX_OVERRUN = 64
KTM960X_VAL_ODI_PORT_STATUS_RX_FC_STATUS = 65536
KTM960X_VAL_ODI_PORT_STATUS_RX_FC_STATUS_0 = 131072
KTM960X_VAL_ODI_PORT_STATUS_RX_SIGNAL_LOSS = 128
KTM960X_VAL_ODI_PORT_STATUS_RX_SYNC_PENDING = 256
# - Defined values for
KTM960X_VAL_ODI_PACKET_FORMAT_NO_HEADER = 1
KTM960X_VAL_ODI_PACKET_FORMAT_VITA49_DATA = 2
KTM960X_VAL_ODI_PACKET_FORMAT_VITA49_WITH_CONTEXT = 3
KTM960X_VAL_ODI_PACKET_FORMAT_VITA49_ONCE = 1001
KTM960X_VAL_ODI_PACKET_FORMAT_VITA49_EXTENSION = 4
# - Defined values for
KTM960X_VAL_ODI_TIMESTAMP_FORMAT_NO_TIMESTAMP = 1
KTM960X_VAL_ODI_TIMESTAMP_FORMAT_GPS = 2
KTM960X_VAL_ODI_TIMESTAMP_FORMAT_RELATIVE = 3
KTM960X_VAL_ODI_TIMESTAMP_FORMAT_SAMPLE_COUNT = 4
KTM960X_VAL_ODI_TIMESTAMP_FORMAT_UTC = 5
# - Defined values for
KTM960X_VAL_ARB_DATA_FORMAT_ENUM_ARB_DATA_FORMATIQ24 = 10
KTM960X_VAL_ARB_DATA_FORMAT_ENUM_ARB_DATA_FORMATIQ32 = 2
KTM960X_VAL_ARB_DATA_FORMAT_ENUM_ARB_DATA_FORMATIQ64 = 3
KTM960X_VAL_ARB_DATA_FORMAT_ENUM_ARB_DATA_FORMAT_OPAQUE = 11
# - Defined values for
KTM960X_VAL_MARKER_ENUM_MARKER1 = 1
KTM960X_VAL_MARKER_ENUM_MARKER2 = 2
KTM960X_VAL_MARKER_ENUM_MARKER3 = 3
KTM960X_VAL_MARKER_ENUM_MARKER4 = 4
KTM960X_VAL_MARKER_ENUM_MARKER_NONE = 0
# - Defined values for
KTM960X_VAL_ARB_MEMORY_MODE_ENUM_ARB_MEMORY_MODE_AUTO = 0
KTM960X_VAL_ARB_MEMORY_MODE_ENUM_ARB_MEMORY_MODE_MANUAL = 1
# - Defined values for
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARB_AUTO = 99
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARBDP = 1
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARBDP_PLUS_MARKERS = 0
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARB_KEYSIGHT = 4
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARB_SHORT = 3
KTM960X_VAL_BINARY_ARB_ENUM_BINARY_ARBSP = 2
# - Defined values for
# parameter FetchType in function KtM960x_MeasurementFetchArrayData
# parameter FetchType in function KtM960x_MeasurementFetchScalarData
# parameter FetchType in function KtM960x_MeasurementReadArrayData
# parameter FetchType in function KtM960x_MeasurementReadScalarData
# parameter FetchType in function KtM960x_MeasurementFetchLatestScalarData
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_CURRENT = 2
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_RESISTANCE = 3
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_SOURCE = 6
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_STATUS = 4
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_TIME = 5
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_VOLTAGE = 1
KTM960X_VAL_MEASUREMENT_FETCH_TYPE_ALL = 0
# - Defined values for
# attribute KTM960X_ATTR_MEASUREMENT_ARM_SOURCE
# attribute KTM960X_ATTR_MEASUREMENT_TRIGGER_SOURCE
# attribute KTM960X_ATTR_TRANSIENT_ARM_SOURCE
# attribute KTM960X_ATTR_TRANSIENT_TRIGGER_SOURCE
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_AINT = 0
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_BUS = 1
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_TIMER = 2
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI0 = 3
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI1 = 4
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI2 = 5
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI3 = 6
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI4 = 7
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI5 = 8
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI6 = 9
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PXI7 = 10
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_EXTERNAL1 = 11
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_EXTERNAL2 = 12
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_INTERNAL1 = 13
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_INTERNAL2 = 14
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PEER1 = 15
KTM960X_VAL_MEASUREMENT_TRIGGER_SOURCE_PEER2 = 16
# - Defined values for
# parameter MeasureType in function KtM960x_MeasurementMeasure
# parameter Val in function KtM960x_MeasurementFunctionGetDisabled
# parameter Val in function KtM960x_MeasurementFunctionGetEnabled
# parameter MeasureType in function KtM960x_MeasurementFunctionGetState
# parameter MeasureType in function KtM960x_MeasurementFunctionSetDisabled
# parameter MeasureType in function KtM960x_MeasurementFunctionSetEnabled
KTM960X_VAL_MEASUREMENT_TYPE_CURRENT = 2
KTM960X_VAL_MEASUREMENT_TYPE_RESISTANCE = 3
KTM960X_VAL_MEASUREMENT_TYPE_VOLTAGE = 1
KTM960X_VAL_MEASUREMENT_TYPE_ALL = 0
# - Defined values for
# parameter Val in function KtM960x_MeasurementGetOutputTrigger
# parameter Triggers in function KtM960x_MeasurementSetOutputTrigger
# parameter Val in function KtM960x_MeasurementArmGetOutputTrigger
# parameter Triggers in function KtM960x_MeasurementArmSetOutputTrigger
# parameter Val in function KtM960x_MeasurementTriggerGetOutputTrigger
# parameter Triggers in function KtM960x_MeasurementTriggerSetOutputTrigger
# parameter Val in function KtM960x_TransientGetOutputTrigger
# parameter Triggers in function KtM960x_TransientSetOutputTrigger
# parameter Val in function KtM960x_TransientArmGetOutputTrigger
# parameter Triggers in function KtM960x_TransientArmSetOutputTrigger
# parameter Val in function KtM960x_TransientCurrentGetListOutputTrigger
# parameter Triggers in function KtM960x_TransientCurrentSetListOutputTrigger
# parameter Val in function KtM960x_TransientTriggerGetOutputTrigger
# parameter Triggers in function KtM960x_TransientTriggerSetOutputTrigger
# parameter Val in function KtM960x_TransientVoltageGetListOutputTrigger
# parameter Triggers in function KtM960x_TransientVoltageSetListOutputTrigger
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI0 = 0
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI1 = 1
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI2 = 2
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI3 = 3
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI4 = 4
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI5 = 5
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI6 = 6
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PXI7 = 7
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_EXTERNAL1 = 8
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_EXTERNAL2 = 9
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_INTERNAL1 = 10
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_INTERNAL2 = 11
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PEER1 = 12
KTM960X_VAL_OUTPUT_TRIGGER_SIGNAL_PEER2 = 13
# - Defined values for
# attribute KTM960X_ATTR_MEASUREMENT_ARM_BYPASS
# attribute KTM960X_ATTR_MEASUREMENT_TRIGGER_BYPASS
# attribute KTM960X_ATTR_TRANSIENT_ARM_BYPASS
# attribute KTM960X_ATTR_TRANSIENT_TRIGGER_BYPASS
KTM960X_VAL_BYPASS_OFF = 0
KTM960X_VAL_BYPASS_ONCE = 1
# - Defined values for
KTM960X_VAL_FORCE_RANGING_MODE_NORMAL = 0
KTM960X_VAL_FORCE_RANGING_MODE_SPEED = 1
KTM960X_VAL_FORCE_RANGING_MODE_RESOLUTION = 2
# - Defined values for
KTM960X_VAL_BUFFER_CONTROL_NEXT = 0
KTM960X_VAL_BUFFER_CONTROL_NEVER = 1
# - Defined values for
KTM960X_VAL_TRACE_OPERATION_MEAN = 0
KTM960X_VAL_TRACE_OPERATION_STANDARD_DEVIATION = 1
KTM960X_VAL_TRACE_OPERATION_MAXIMUM = 2
KTM960X_VAL_TRACE_OPERATION_MINIMUM = 3
KTM960X_VAL_TRACE_OPERATION_PEAK_TO_PEAK = 4
# - Defined values for
KTM960X_VAL_TIME_STAMP_FORMAT_ABSOLUTE = 0
KTM960X_VAL_TIME_STAMP_FORMAT_DELTA = 1
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_CURRENT_BASE_TYPE
# attribute KTM960X_ATTR_OUTPUT_VOLTAGE_BASE_TYPE
KTM960X_VAL_OUTPUT_BASE_TYPE_MANUAL = 0
KTM960X_VAL_OUTPUT_BASE_TYPE_IMMEDIATE = 1
KTM960X_VAL_OUTPUT_BASE_TYPE_TRIGGERED = 2
KTM960X_VAL_OUTPUT_BASE_TYPE_START = 3
KTM960X_VAL_OUTPUT_BASE_TYPE_STOP = 4
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_CURRENT_POST_TYPE
# attribute KTM960X_ATTR_OUTPUT_VOLTAGE_POST_TYPE
KTM960X_VAL_OUTPUT_POST_TYPE_TRIGGERED = 0
KTM960X_VAL_OUTPUT_POST_TYPE_START = 1
KTM960X_VAL_OUTPUT_POST_TYPE_STOP = 2
KTM960X_VAL_OUTPUT_POST_TYPE_BASE = 3
KTM960X_VAL_OUTPUT_POST_TYPE_MANUAL = 4
KTM960X_VAL_OUTPUT_POST_TYPE_IMMEDIATE = 5
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_OFF_CONDITION
KTM960X_VAL_OFF_CONDITION_ZERO = 0
KTM960X_VAL_OFF_CONDITION_HIZ = 1
KTM960X_VAL_OFF_CONDITION_NORMAL = 2
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_PRIORITY_MODE
KTM960X_VAL_PRIORITY_MODE_VOLTAGE = 0
KTM960X_VAL_PRIORITY_MODE_CURRENT = 1
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_SHAPE
KTM960X_VAL_SHAPE_MODE_DC = 0
KTM960X_VAL_SHAPE_MODE_PULSE = 1
# - Defined values for
# attribute KTM960X_ATTR_TRANSIENT_CURRENT_MODE
# attribute KTM960X_ATTR_TRANSIENT_VOLTAGE_MODE
KTM960X_VAL_OUTPUT_MODE_FIXED = 0
KTM960X_VAL_OUTPUT_MODE_LIST = 1
KTM960X_VAL_OUTPUT_MODE_SWEEP = 2
# - Defined values for
# attribute KTM960X_ATTR_TRANSIENT_SWEEP_MODE
KTM960X_VAL_SWEEP_MODE_SINGLE = 0
KTM960X_VAL_SWEEP_MODE_DOUBLE = 1
# - Defined values for
# attribute KTM960X_ATTR_TRANSIENT_SWEEP_OUTPUT_RANGING_MODE
KTM960X_VAL_OUTPUT_RANGING_MODE_BEST = 0
KTM960X_VAL_OUTPUT_RANGING_MODE_FIXED = 2
# - Defined values for
# attribute KTM960X_ATTR_TRANSIENT_SWEEP_OUTPUT_SCALE
KTM960X_VAL_OUTPUT_SCALE_LINEAR = 0
# - Defined values for
# attribute KTM960X_ATTR_TRANSIENT_SWEEP_DIRECTION
#
KTM960X_VAL_SWEEP_DIRECTION_UP = 0
KTM960X_VAL_SWEEP_DIRECTION_DOWN = 1
# - Defined values for
# attribute KTM960X_ATTR_OUTPUT_OPERATION_MODE
KTM960X_VAL_OUTPUT_OPERATION_MODE_STANDARD = 0
KTM960X_VAL_OUTPUT_OPERATION_MODE_POWER_SUPPLY = 1
# - Defined values for
# attribute KTM960X_ATTR_MEASUREMENT_ACQUISITION_MODE
KTM960X_VAL_ACQUISITION_MODE_NORMAL = 0
KTM960X_VAL_ACQUISITION_MODE_SAMPLING = 1
# - Defined values for
# attribute KTM960X_ATTR_MODULE_IO_EXTERNAL_EDGE_POSITION
# attribute KTM960X_ATTR_MODULE_IO_PXIE_EDGE_POSITION
KTM960X_VAL_IO_EDGE_POSITION_BEFORE = 0
KTM960X_VAL_IO_EDGE_POSITION_AFTER = 1
KTM960X_VAL_IO_EDGE_POSITION_BOTH = 2
# - Defined values for
# attribute KTM960X_ATTR_MODULE_IO_EXTERNAL_FUNCTION
# attribute KTM960X_ATTR_MODULE_IO_PXIE_FUNCTION
KTM960X_VAL_IO_FUNCTION_TRIGGER_OUTPUT = 0
KTM960X_VAL_IO_FUNCTION_TRIGGER_INPUT = 1
KTM960X_VAL_IO_FUNCTION_DIGITAL_OUTPUT = 2
# - Defined values for
# attribute KTM960X_ATTR_MODULE_IO_EXTERNAL_LEVEL
# attribute KTM960X_ATTR_MODULE_IO_PXIE_LEVEL
# parameter Val in function KtM960x_ModuleIOExternalRead
# parameter Val in function KtM960x_ModuleIOPxieRead
KTM960X_VAL_IO_LEVEL_HIGH = 0
KTM960X_VAL_IO_LEVEL_LOW = | |
postCellId="../AIBR/0/"/>
</projection>
<projection id="NC_SDQL_ALML_Acetylcholine" postsynapticPopulation="ALML" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../ALML/0/"/>
</projection>
<projection id="NC_SDQL_AVAL_Acetylcholine" postsynapticPopulation="AVAL" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_SDQL_AVAR_Acetylcholine" postsynapticPopulation="AVAR" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_SDQL_AVEL_Acetylcholine" postsynapticPopulation="AVEL" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../AVEL/0/"/>
</projection>
<projection id="NC_SDQL_FLPL_Acetylcholine" postsynapticPopulation="FLPL" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../FLPL/0/"/>
</projection>
<projection id="NC_SDQL_RICR_Acetylcholine" postsynapticPopulation="RICR" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../RICR/0/"/>
</projection>
<projection id="NC_SDQL_RIS_Acetylcholine" postsynapticPopulation="RIS" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../RIS/0/"/>
</projection>
<projection id="NC_SDQL_RMFL_Acetylcholine" postsynapticPopulation="RMFL" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../RMFL/0/"/>
</projection>
<projection id="NC_SDQL_SDQR_Generic_GJ" postsynapticPopulation="SDQR" presynapticPopulation="SDQL" synapse="">
<connection id="0" preCellId="../SDQL/0/" postCellId="../SDQR/0/"/>
</projection>
<projection id="NC_SDQR_ADLL_Acetylcholine" postsynapticPopulation="ADLL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../ADLL/0/"/>
</projection>
<projection id="NC_SDQR_AIBL_Acetylcholine" postsynapticPopulation="AIBL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AIBL/0/"/>
</projection>
<projection id="NC_SDQR_AVAL_Generic_GJ" postsynapticPopulation="AVAL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_SDQR_AVAL_Acetylcholine" postsynapticPopulation="AVAL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_SDQR_AVBL_Generic_GJ" postsynapticPopulation="AVBL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_SDQR_AVBL_Acetylcholine" postsynapticPopulation="AVBL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_SDQR_AVBR_Acetylcholine" postsynapticPopulation="AVBR" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../AVBR/0/"/>
</projection>
<projection id="NC_SDQR_DVA_Acetylcholine" postsynapticPopulation="DVA" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../DVA/0/"/>
</projection>
<projection id="NC_SDQR_RICR_Acetylcholine" postsynapticPopulation="RICR" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../RICR/0/"/>
</projection>
<projection id="NC_SDQR_RIVL_Generic_GJ" postsynapticPopulation="RIVL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../RIVL/0/"/>
</projection>
<projection id="NC_SDQR_RIVR_Generic_GJ" postsynapticPopulation="RIVR" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../RIVR/0/"/>
</projection>
<projection id="NC_SDQR_RMHL_Acetylcholine" postsynapticPopulation="RMHL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../RMHL/0/"/>
</projection>
<projection id="NC_SDQR_RMHR_Acetylcholine" postsynapticPopulation="RMHR" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../RMHR/0/"/>
</projection>
<projection id="NC_SDQR_SDQL_Generic_GJ" postsynapticPopulation="SDQL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../SDQL/0/"/>
</projection>
<projection id="NC_SDQR_SIBVL_Generic_GJ" postsynapticPopulation="SIBVL" presynapticPopulation="SDQR" synapse="">
<connection id="0" preCellId="../SDQR/0/" postCellId="../SIBVL/0/"/>
</projection>
<projection id="NC_SIADL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SIADL" synapse="">
<connection id="0" preCellId="../SIADL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SIADR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SIADR" synapse="">
<connection id="0" preCellId="../SIADR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SIAVL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SIAVL" synapse="">
<connection id="0" preCellId="../SIAVL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SIAVR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SIAVR" synapse="">
<connection id="0" preCellId="../SIAVR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SIBDL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SIBDL" synapse="">
<connection id="0" preCellId="../SIBDL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SIBDL_SIBVL_Generic_GJ" postsynapticPopulation="SIBVL" presynapticPopulation="SIBDL" synapse="">
<connection id="0" preCellId="../SIBDL/0/" postCellId="../SIBVL/0/"/>
</projection>
<projection id="NC_SIBDR_AIML_Generic_GJ" postsynapticPopulation="AIML" presynapticPopulation="SIBDR" synapse="">
<connection id="0" preCellId="../SIBDR/0/" postCellId="../AIML/0/"/>
</projection>
<projection id="NC_SIBDR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SIBDR" synapse="">
<connection id="0" preCellId="../SIBDR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SIBDR_SIBVR_Generic_GJ" postsynapticPopulation="SIBVR" presynapticPopulation="SIBDR" synapse="">
<connection id="0" preCellId="../SIBDR/0/" postCellId="../SIBVR/0/"/>
</projection>
<projection id="NC_SIBVL_AVBL_Generic_GJ" postsynapticPopulation="AVBL" presynapticPopulation="SIBVL" synapse="">
<connection id="0" preCellId="../SIBVL/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_SIBVL_AVBR_Generic_GJ" postsynapticPopulation="AVBR" presynapticPopulation="SIBVL" synapse="">
<connection id="0" preCellId="../SIBVL/0/" postCellId="../AVBR/0/"/>
</projection>
<projection id="NC_SIBVL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SIBVL" synapse="">
<connection id="0" preCellId="../SIBVL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SIBVL_SDQR_Generic_GJ" postsynapticPopulation="SDQR" presynapticPopulation="SIBVL" synapse="">
<connection id="0" preCellId="../SIBVL/0/" postCellId="../SDQR/0/"/>
</projection>
<projection id="NC_SIBVL_SIBDL_Generic_GJ" postsynapticPopulation="SIBDL" presynapticPopulation="SIBVL" synapse="">
<connection id="0" preCellId="../SIBVL/0/" postCellId="../SIBDL/0/"/>
</projection>
<projection id="NC_SIBVR_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SIBVR" synapse="">
<connection id="0" preCellId="../SIBVR/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SIBVR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SIBVR" synapse="">
<connection id="0" preCellId="../SIBVR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SIBVR_RMHL_Generic_GJ" postsynapticPopulation="RMHL" presynapticPopulation="SIBVR" synapse="">
<connection id="0" preCellId="../SIBVR/0/" postCellId="../RMHL/0/"/>
</projection>
<projection id="NC_SIBVR_SIBDR_Generic_GJ" postsynapticPopulation="SIBDR" presynapticPopulation="SIBVR" synapse="">
<connection id="0" preCellId="../SIBVR/0/" postCellId="../SIBDR/0/"/>
</projection>
<projection id="NC_SMBDL_AVAR_Acetylcholine" postsynapticPopulation="AVAR" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../AVAR/0/"/>
</projection>
<projection id="NC_SMBDL_AVKL_Generic_GJ" postsynapticPopulation="AVKL" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../AVKL/0/"/>
</projection>
<projection id="NC_SMBDL_AVKR_Generic_GJ" postsynapticPopulation="AVKR" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../AVKR/0/"/>
</projection>
<projection id="NC_SMBDL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SMBDL_RMED_Acetylcholine" postsynapticPopulation="RMED" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../RMED/0/"/>
</projection>
<projection id="NC_SMBDL_SAADL_Generic_GJ" postsynapticPopulation="SAADL" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../SAADL/0/"/>
</projection>
<projection id="NC_SMBDL_SAAVR_Acetylcholine" postsynapticPopulation="SAAVR" presynapticPopulation="SMBDL" synapse="">
<connection id="0" preCellId="../SMBDL/0/" postCellId="../SAAVR/0/"/>
</projection>
<projection id="NC_SMBDR_ALNL_Generic_GJ" postsynapticPopulation="ALNL" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../ALNL/0/"/>
</projection>
<projection id="NC_SMBDR_AVAL_Acetylcholine" postsynapticPopulation="AVAL" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../AVAL/0/"/>
</projection>
<projection id="NC_SMBDR_AVKL_Generic_GJ" postsynapticPopulation="AVKL" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../AVKL/0/"/>
</projection>
<projection id="NC_SMBDR_AVKR_Generic_GJ" postsynapticPopulation="AVKR" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../AVKR/0/"/>
</projection>
<projection id="NC_SMBDR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SMBDR_RMED_Acetylcholine" postsynapticPopulation="RMED" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../RMED/0/"/>
</projection>
<projection id="NC_SMBDR_SAAVL_Acetylcholine" postsynapticPopulation="SAAVL" presynapticPopulation="SMBDR" synapse="">
<connection id="0" preCellId="../SMBDR/0/" postCellId="../SAAVL/0/"/>
</projection>
<projection id="NC_SMBVL_PLNL_Acetylcholine" postsynapticPopulation="PLNL" presynapticPopulation="SMBVL" synapse="">
<connection id="0" preCellId="../SMBVL/0/" postCellId="../PLNL/0/"/>
</projection>
<projection id="NC_SMBVL_RMEV_Acetylcholine" postsynapticPopulation="RMEV" presynapticPopulation="SMBVL" synapse="">
<connection id="0" preCellId="../SMBVL/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_SMBVL_SAADL_Acetylcholine" postsynapticPopulation="SAADL" presynapticPopulation="SMBVL" synapse="">
<connection id="0" preCellId="../SMBVL/0/" postCellId="../SAADL/0/"/>
</projection>
<projection id="NC_SMBVL_SAAVR_Generic_GJ" postsynapticPopulation="SAAVR" presynapticPopulation="SMBVL" synapse="">
<connection id="0" preCellId="../SMBVL/0/" postCellId="../SAAVR/0/"/>
</projection>
<projection id="NC_SMBVR_AVKL_Generic_GJ" postsynapticPopulation="AVKL" presynapticPopulation="SMBVR" synapse="">
<connection id="0" preCellId="../SMBVR/0/" postCellId="../AVKL/0/"/>
</projection>
<projection id="NC_SMBVR_AVKR_Generic_GJ" postsynapticPopulation="AVKR" presynapticPopulation="SMBVR" synapse="">
<connection id="0" preCellId="../SMBVR/0/" postCellId="../AVKR/0/"/>
</projection>
<projection id="NC_SMBVR_RMEV_Acetylcholine" postsynapticPopulation="RMEV" presynapticPopulation="SMBVR" synapse="">
<connection id="0" preCellId="../SMBVR/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_SMBVR_SAADR_Acetylcholine" postsynapticPopulation="SAADR" presynapticPopulation="SMBVR" synapse="">
<connection id="0" preCellId="../SMBVR/0/" postCellId="../SAADR/0/"/>
</projection>
<projection id="NC_SMBVR_SAAVL_Generic_GJ" postsynapticPopulation="SAAVL" presynapticPopulation="SMBVR" synapse="">
<connection id="0" preCellId="../SMBVR/0/" postCellId="../SAAVL/0/"/>
</projection>
<projection id="NC_SMDDL_RIAL_Acetylcholine" postsynapticPopulation="RIAL" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RIAL/0/"/>
</projection>
<projection id="NC_SMDDL_RIAR_Acetylcholine" postsynapticPopulation="RIAR" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RIAR/0/"/>
</projection>
<projection id="NC_SMDDL_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SMDDL_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SMDDL_RIS_Generic_GJ" postsynapticPopulation="RIS" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RIS/0/"/>
</projection>
<projection id="NC_SMDDL_RMDDL_Generic_GJ" postsynapticPopulation="RMDDL" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../RMDDL/0/"/>
</projection>
<projection id="NC_SMDDL_SMDVR_Acetylcholine" postsynapticPopulation="SMDVR" presynapticPopulation="SMDDL" synapse="">
<connection id="0" preCellId="../SMDDL/0/" postCellId="../SMDVR/0/"/>
</projection>
<projection id="NC_SMDDR_RIAL_Acetylcholine" postsynapticPopulation="RIAL" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../RIAL/0/"/>
</projection>
<projection id="NC_SMDDR_RIAR_Acetylcholine" postsynapticPopulation="RIAR" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../RIAR/0/"/>
</projection>
<projection id="NC_SMDDR_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SMDDR_RIS_Generic_GJ" postsynapticPopulation="RIS" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../RIS/0/"/>
</projection>
<projection id="NC_SMDDR_RMDDR_Generic_GJ" postsynapticPopulation="RMDDR" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_SMDDR_VD1_Generic_GJ" postsynapticPopulation="VD1" presynapticPopulation="SMDDR" synapse="">
<connection id="0" preCellId="../SMDDR/0/" postCellId="../VD1/0/"/>
</projection>
<projection id="NC_SMDVL_PVR_Acetylcholine" postsynapticPopulation="PVR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../PVR/0/"/>
</projection>
<projection id="NC_SMDVL_RIAL_Acetylcholine" postsynapticPopulation="RIAL" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIAL/0/"/>
</projection>
<projection id="NC_SMDVL_RIAR_Acetylcholine" postsynapticPopulation="RIAR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIAR/0/"/>
</projection>
<projection id="NC_SMDVL_RIBR_Generic_GJ" postsynapticPopulation="RIBR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIBR/0/"/>
</projection>
<projection id="NC_SMDVL_RIS_Generic_GJ" postsynapticPopulation="RIS" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIS/0/"/>
</projection>
<projection id="NC_SMDVL_RIVL_Generic_GJ" postsynapticPopulation="RIVL" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIVL/0/"/>
</projection>
<projection id="NC_SMDVL_RIVL_Acetylcholine" postsynapticPopulation="RIVL" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RIVL/0/"/>
</projection>
<projection id="NC_SMDVL_RMDDR_Acetylcholine" postsynapticPopulation="RMDDR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_SMDVL_RMDVL_Generic_GJ" postsynapticPopulation="RMDVL" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../RMDVL/0/"/>
</projection>
<projection id="NC_SMDVL_SMDDR_Acetylcholine" postsynapticPopulation="SMDDR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../SMDDR/0/"/>
</projection>
<projection id="NC_SMDVL_SMDVR_Generic_GJ" postsynapticPopulation="SMDVR" presynapticPopulation="SMDVL" synapse="">
<connection id="0" preCellId="../SMDVL/0/" postCellId="../SMDVR/0/"/>
</projection>
<projection id="NC_SMDVR_RIAL_Acetylcholine" postsynapticPopulation="RIAL" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RIAL/0/"/>
</projection>
<projection id="NC_SMDVR_RIAR_Acetylcholine" postsynapticPopulation="RIAR" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RIAR/0/"/>
</projection>
<projection id="NC_SMDVR_RIBL_Generic_GJ" postsynapticPopulation="RIBL" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RIBL/0/"/>
</projection>
<projection id="NC_SMDVR_RIVR_Acetylcholine" postsynapticPopulation="RIVR" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RIVR/0/"/>
</projection>
<projection id="NC_SMDVR_RIVR_Generic_GJ" postsynapticPopulation="RIVR" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RIVR/0/"/>
</projection>
<projection id="NC_SMDVR_RMDDL_Acetylcholine" postsynapticPopulation="RMDDL" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RMDDL/0/"/>
</projection>
<projection id="NC_SMDVR_RMDVR_Generic_GJ" postsynapticPopulation="RMDVR" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../RMDVR/0/"/>
</projection>
<projection id="NC_SMDVR_SMDDL_Acetylcholine" postsynapticPopulation="SMDDL" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../SMDDL/0/"/>
</projection>
<projection id="NC_SMDVR_SMDVL_Generic_GJ" postsynapticPopulation="SMDVL" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../SMDVL/0/"/>
</projection>
<projection id="NC_SMDVR_VB1_Generic_GJ" postsynapticPopulation="VB1" presynapticPopulation="SMDVR" synapse="">
<connection id="0" preCellId="../SMDVR/0/" postCellId="../VB1/0/"/>
</projection>
<projection id="NC_URADL_IL1DL_Acetylcholine" postsynapticPopulation="IL1DL" presynapticPopulation="URADL" synapse="">
<connection id="0" preCellId="../URADL/0/" postCellId="../IL1DL/0/"/>
</projection>
<projection id="NC_URADL_RIPL_Acetylcholine" postsynapticPopulation="RIPL" presynapticPopulation="URADL" synapse="">
<connection id="0" preCellId="../URADL/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_URADL_RMEL_Acetylcholine" postsynapticPopulation="RMEL" presynapticPopulation="URADL" synapse="">
<connection id="0" preCellId="../URADL/0/" postCellId="../RMEL/0/"/>
</projection>
<projection id="NC_URADR_IL1DR_Acetylcholine" postsynapticPopulation="IL1DR" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../IL1DR/0/"/>
</projection>
<projection id="NC_URADR_RIPR_Acetylcholine" postsynapticPopulation="RIPR" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_URADR_RMDVR_Acetylcholine" postsynapticPopulation="RMDVR" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../RMDVR/0/"/>
</projection>
<projection id="NC_URADR_RMED_Acetylcholine" postsynapticPopulation="RMED" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../RMED/0/"/>
</projection>
<projection id="NC_URADR_RMER_Acetylcholine" postsynapticPopulation="RMER" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../RMER/0/"/>
</projection>
<projection id="NC_URADR_URYDR_Acetylcholine" postsynapticPopulation="URYDR" presynapticPopulation="URADR" synapse="">
<connection id="0" preCellId="../URADR/0/" postCellId="../URYDR/0/"/>
</projection>
<projection id="NC_URAVL_RIPL_Acetylcholine" postsynapticPopulation="RIPL" presynapticPopulation="URAVL" synapse="">
<connection id="0" preCellId="../URAVL/0/" postCellId="../RIPL/0/"/>
</projection>
<projection id="NC_URAVL_RMEL_Acetylcholine" postsynapticPopulation="RMEL" presynapticPopulation="URAVL" synapse="">
<connection id="0" preCellId="../URAVL/0/" postCellId="../RMEL/0/"/>
</projection>
<projection id="NC_URAVL_RMER_Acetylcholine" postsynapticPopulation="RMER" presynapticPopulation="URAVL" synapse="">
<connection id="0" preCellId="../URAVL/0/" postCellId="../RMER/0/"/>
</projection>
<projection id="NC_URAVL_RMEV_Acetylcholine" postsynapticPopulation="RMEV" presynapticPopulation="URAVL" synapse="">
<connection id="0" preCellId="../URAVL/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_URAVR_IL1R_Acetylcholine" postsynapticPopulation="IL1R" presynapticPopulation="URAVR" synapse="">
<connection id="0" preCellId="../URAVR/0/" postCellId="../IL1R/0/"/>
</projection>
<projection id="NC_URAVR_RIPR_Acetylcholine" postsynapticPopulation="RIPR" presynapticPopulation="URAVR" synapse="">
<connection id="0" preCellId="../URAVR/0/" postCellId="../RIPR/0/"/>
</projection>
<projection id="NC_URAVR_RMDVL_Acetylcholine" postsynapticPopulation="RMDVL" presynapticPopulation="URAVR" synapse="">
<connection id="0" preCellId="../URAVR/0/" postCellId="../RMDVL/0/"/>
</projection>
<projection id="NC_URAVR_RMER_Acetylcholine" postsynapticPopulation="RMER" presynapticPopulation="URAVR" synapse="">
<connection id="0" preCellId="../URAVR/0/" postCellId="../RMER/0/"/>
</projection>
<projection id="NC_URAVR_RMEV_Acetylcholine" postsynapticPopulation="RMEV" presynapticPopulation="URAVR" synapse="">
<connection id="0" preCellId="../URAVR/0/" postCellId="../RMEV/0/"/>
</projection>
<projection id="NC_URBL_AVBL_Acetylcholine" postsynapticPopulation="AVBL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../AVBL/0/"/>
</projection>
<projection id="NC_URBL_CEPDL_Acetylcholine" postsynapticPopulation="CEPDL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../CEPDL/0/"/>
</projection>
<projection id="NC_URBL_IL1L_Acetylcholine" postsynapticPopulation="IL1L" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../IL1L/0/"/>
</projection>
<projection id="NC_URBL_OLQDL_Generic_GJ" postsynapticPopulation="OLQDL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../OLQDL/0/"/>
</projection>
<projection id="NC_URBL_OLQVL_Generic_GJ" postsynapticPopulation="OLQVL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../OLQVL/0/"/>
</projection>
<projection id="NC_URBL_RICR_Acetylcholine" postsynapticPopulation="RICR" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../RICR/0/"/>
</projection>
<projection id="NC_URBL_RMDDR_Acetylcholine" postsynapticPopulation="RMDDR" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../RMDDR/0/"/>
</projection>
<projection id="NC_URBL_SIAVL_Acetylcholine" postsynapticPopulation="SIAVL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../SIAVL/0/"/>
</projection>
<projection id="NC_URBL_SMBDR_Acetylcholine" postsynapticPopulation="SMBDR" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../SMBDR/0/"/>
</projection>
<projection id="NC_URBL_URXL_Acetylcholine" postsynapticPopulation="URXL" presynapticPopulation="URBL" synapse="">
<connection id="0" preCellId="../URBL/0/" postCellId="../URXL/0/"/>
</projection>
<projection id="NC_URBR_ADAR_Acetylcholine" postsynapticPopulation="ADAR" presynapticPopulation="URBR" synapse="">
<connection id="0" preCellId="../URBR/0/" | |
import click
from collections import defaultdict
from copy import deepcopy
import io
import json
from langid.langid import LanguageIdentifier, model
import numpy as np
import os
import pickle
import random
import xml.sax
from .sequence_similarity import check_sequence_similarity, print_alignment_stats
from .database import load_alignments_from_sqlite, save_alignments_to_sqlite
from .xml_parser import clean_tei, convert_to_page_id, \
create_ocr_gt_id_mappings, extract_page_fulltext, TEIHandler
from qurator.sbb_ocr_postcorrection.data_structures import Corpus
from qurator.sbb_ocr_postcorrection.feature_extraction.encoding import add_padding
from qurator.sbb_ocr_postcorrection.helpers import add_seq_id_to_aligned_seq, \
align_context, combine_sequences_to_str, \
create_incremental_context_alignment, gather_aligned_sequences, \
get_file_paths, get_gt_path_subset, normalize_char_alignments, \
normalize_data_encoding, split_into_adjacent_parts, unsqueeze_corpus
from qurator.dinglehopper.align import align, seq_align
@click.command()
@click.argument('ocr-dir', type=click.Path(exists=True))
@click.argument('gt-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=False))
def align_sequences(ocr_dir, gt_dir, out_dir):
'''
Align OCR and GT sequences.
\b
Arguments:
ocr-dir -- The absolute path to the OCR json file.
gt-dir -- The absolute path to the GT json file.
out-dir -- The absolute path to the aligned seq json file.
'''
# make paths absolute
ocr_dir = os.path.abspath(ocr_dir)
gt_dir = os.path.abspath(gt_dir)
out_dir = os.path.abspath(out_dir)
print_doc_stats = True
print_page_stats = False
char_alignment = False
with io.open(ocr_dir, mode='r') as f_in:
ocr_data = json.load(f_in)
with io.open(gt_dir, mode='r') as f_in:
gt_data = json.load(f_in)
ocr_data = normalize_data_encoding(ocr_data, form='NFC')
gt_data = normalize_data_encoding(gt_data, form='NFC')
###################################
# #
# GT and OCR Sequence Alignment #
# #
###################################
total_similar_sequences = 0
total_sequences = 0
aligned_corpus = defaultdict(defaultdict)
if char_alignment:
char_aligned_corpus = defaultdict(defaultdict)
print('\nSTART: Sequence Alignment')
for ocr_doc_id, gt_doc_id in zip(ocr_data, gt_data):
assert ocr_doc_id == gt_doc_id, 'OCR Doc ID and GT Doc ID are not identical: {} (OCR) | {} (GT).'.format(ocr_doc_id, gt_doc_id)
if print_page_stats:
print('\n\nDocument ID: {}'.format(ocr_doc_id))
doc_similar_sequences = 0
doc_sequences = 0
aligned_doc = defaultdict(list)
for ocr_page_id, gt_page_id in zip(ocr_data[ocr_doc_id], gt_data[gt_doc_id]):
# pre-check if IDs are okay and pages contain text
assert ocr_page_id == gt_page_id, 'OCR Page ID and GT Page ID are not identical: {} (OCR) / {} (GT).'.format(ocr_page_id, gt_page_id)
gt_page_length = len(gt_data[gt_doc_id][gt_page_id])
ocr_page_length = len(ocr_data[ocr_doc_id][ocr_page_id])
if gt_page_length == 0 or ocr_page_length == 0:
continue
# sequence alignment and similarity check
aligned_sequences = seq_align(ocr_data[ocr_doc_id][ocr_page_id], gt_data[gt_doc_id][gt_page_id])
aligned_sequences_with_id = add_seq_id_to_aligned_seq(aligned_sequences)
ocr, gt, character_error_rates, levenshtein_distances, min_distances, max_distances, similarity_encoding = check_sequence_similarity(aligned_sequences_with_id, similarity_range=(0.00, 0.10))
assert len(ocr) == len(gt) == len(similarity_encoding), '# of OCR and GT sequences are not identical: {} (OCR) | {} (GT).'.format(ocr, gt)
# some stats
doc_sequences += gt_page_length
total_sequences += gt_page_length
num_similar_sequences = sum(similarity_encoding)
doc_similar_sequences += num_similar_sequences
total_similar_sequences += num_similar_sequences
if print_page_stats:
print_alignment_stats(ocr_page_id, gt_page_length, num_similar_sequences, scope='PAGE')
# optional: char alignment
if char_alignment:
ocr_char_aligned = []
gt_char_aligned = []
for ocr_seq, gt_seq in zip(ocr, gt):
aligned_characters = align(ocr_seq, gt_seq)
ocr_char_aligned_seq, gt_char_aligned_seq = normalize_char_alignments(aligned_characters)
ocr_char_aligned.append(ocr_char_aligned_seq)
gt_char_aligned.append(gt_char_aligned_seq)
aligned_doc[ocr_page_id] = [ocr_char_aligned, gt_char_aligned, character_error_rates, levenshtein_distances, min_distances, max_distances, similarity_encoding]
else:
aligned_doc[ocr_page_id] = (ocr, gt, character_error_rates, levenshtein_distances, min_distances, max_distances, similarity_encoding)
# break
#combined_ocr_seq, combined_gt_seq = combine_sequences_to_str(aligned_doc[ocr_page_id])
if print_doc_stats:
print_alignment_stats(ocr_doc_id, doc_sequences, doc_similar_sequences, scope='DOC')
aligned_corpus[ocr_doc_id] = aligned_doc
print('\nEND: Sequence Alignment')
print_alignment_stats('DTA', total_sequences, total_similar_sequences, scope='CORPUS')
with io.open(out_dir, mode='w') as f_out:
json.dump(aligned_corpus, f_out)
################################################################################
@click.command()
@click.argument('in-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=False))
def apply_sliding_window(in_dir, out_dir):
'''
Apply sliding window reformatting to aligned data.
\b
Arguments:
in-dir -- The absolute path to the aligned JSON data
out-dir -- The absolute path to the aligned JSON data (sliding window)
'''
# START: script
# make paths absolute
in_dir = os.path.abspath(in_dir)
out_dir = os.path.abspath(out_dir)
with io.open(in_dir, mode='r') as f_in:
aligned_corpus = json.load(f_in)
aligned_corpus_context_aligned, splitted_ocr_page, splitted_gt_page, aligned_context_ocr_page, aligned_context_gt_page = create_incremental_context_alignment(aligned_corpus)
# Helper functions should be moved elsewhere (in the long run)
def generator(page):
for ocr_line, gt_line in zip(page[0], page[1]):
yield ((ocr_line[0], ocr_line[1]), (gt_line[0], gt_line[1]))
aligned_corpus_context_aligned_copy = deepcopy(aligned_corpus_context_aligned)
aligned_corpus_new = defaultdict(defaultdict)
faulty_pages_total = {}
for doc_id, doc_content in aligned_corpus_context_aligned.items():
faulty_pages_doc = []
print('Document ID: {}'.format(doc_id))
aligned_doc = defaultdict(list)
for page_id, page_content in doc_content.items():
#print(page_id)
page_iterator = generator(page_content)
try:
ocr, gt, character_error_rates, levenshtein_distances, min_distances, max_distances, similarity_encoding = check_sequence_similarity(page_iterator, similarity_range=(0.00, 0.10))
aligned_doc[page_id] = (ocr, gt, character_error_rates, levenshtein_distances, min_distances, max_distances, similarity_encoding)
except:
faulty_pages_doc.append(page_id)
aligned_corpus_new[doc_id] = aligned_doc
faulty_pages_total[doc_id] = faulty_pages_doc
#break
with io.open(out_dir, mode='w') as f_out:
json.dump(aligned_corpus_new, f_out)
################################################################################
@click.command()
@click.argument('training-dir', type=click.Path(exists=True))
@click.argument('validation-dir', type=click.Path(exists=True))
@click.argument('testing-dir', type=click.Path(exists=True))
@click.argument('training-target-dir', type=click.Path(exists=True))
@click.argument('validation-target-dir', type=click.Path(exists=True))
@click.argument('testing-target-dir', type=click.Path(exists=True))
def create_detector_targets(training_dir, validation_dir, testing_dir,
training_target_dir, validation_target_dir,
testing_target_dir):
'''
Needs to checked!!!
'''
# make paths absolute
training_dir = os.path.abspath(training_dir)
validation_dir = os.path.abspath(validation_dir)
testing_dir = os.path.abspath(testing_dir)
training_target_dir = os.path.abspath(training_target_dir)
validation_target_dir = os.path.abspath(validation_target_dir)
testing_target_dir = os.path.abspath(testing_target_dir)
max_wordpiece_length = 1
if max_wordpiece_length > 1:
encoded_training_ocr_path = home_dir + '/Qurator/used_data/features/dta/encoded_training_ocr_sliding_window_3_charge2_170920.npy'
encoded_training_gt_path = home_dir + '/Qurator/used_data/features/dta/encoded_training_gt_sliding_window_3_charge2_170920.npy'
encoded_testing_ocr_path = home_dir + '/Qurator/used_data/features/dta/encoded_testing_ocr_sliding_window_3_2charges_170920.npy'
encoded_testing_gt_path = home_dir + '/Qurator/used_data/features/dta/encoded_testing_gt_sliding_window_3_2charges_170920.npy'
encoded_validation_ocr_path = home_dir + '/Qurator/used_data/features/dta/encoded_validation_ocr_sliding_window_3_2charges_small_170920.npy'
encoded_validation_gt_path = home_dir + '/Qurator/used_data/features/dta/encoded_validation_gt_sliding_window_3_2charges_small_170920.npy'
detector_training_path = home_dir + '/Qurator/used_data/features/dta/detector_target_training_sliding_window_german_3_charge2_170920.npy'
detector_testing_path = home_dir + '/Qurator/used_data/features/dta/detector_target_testing_sliding_window_german_3_2charges_170920.npy'
detector_validation_path = home_dir + '/Qurator/used_data/features/dta/detector_target_validation_sliding_window_german_3_2charges_small_170920.npy'
encoded_training_ocr = np.load(encoded_training_ocr_path)
encoded_training_gt = np.load(encoded_training_gt_path)
encoded_testing_ocr = np.load(encoded_testing_ocr_path)
encoded_testing_gt = np.load(encoded_testing_gt_path)
encoded_validation_ocr = np.load(encoded_validation_ocr_path)
encoded_validation_gt = np.load(encoded_validation_gt_path)
else:
alignments_training, _, _ = load_alignments_from_sqlite(path=training_dir, size='total')
alignments_testing, _, _ = load_alignments_from_sqlite(path=testing_dir, size='total')
alignments_validation, _, _ = load_alignments_from_sqlite(path=validation_dir, size='total')
if max_wordpiece_length == 1:
max_length = 100
training_targets = []
testing_targets = []
validation_targets = []
# targets training
for alignment in alignments_training:
ocr = alignment[3]
gt = alignment[4]
if len(ocr) != len(gt):
diff = abs(len(ocr)-len(gt))
if len(ocr) < len(gt):
ocr += (diff*' ')
else:
gt += (diff*' ')
assert len(ocr) == len(gt)
training_target = []
for char_ocr, char_gt in zip(ocr, gt):
if char_ocr == char_gt:
training_target.append(1)
else:
training_target.append(2)
training_targets.append(training_target)
# targets testing
for alignment in alignments_testing:
ocr = alignment[3]
gt = alignment[4]
if len(ocr) != len(gt):
diff = abs(len(ocr)-len(gt))
if len(ocr) < len(gt):
ocr += (diff*' ')
else:
gt += (diff*' ')
assert len(ocr) == len(gt)
testing_target = []
for char_ocr, char_gt in zip(ocr, gt):
if char_ocr == char_gt:
testing_target.append(1)
else:
testing_target.append(2)
testing_targets.append(testing_target)
# targets validation
for alignment in alignments_validation:
ocr = alignment[3]
gt = alignment[4]
if len(ocr) != len(gt):
diff = abs(len(ocr)-len(gt))
if len(ocr) < len(gt):
ocr += (diff*' ')
else:
gt += (diff*' ')
assert len(ocr) == len(gt)
validation_target = []
for char_ocr, char_gt in zip(ocr, gt):
if char_ocr == char_gt:
validation_target.append(1)
else:
validation_target.append(2)
validation_targets.append(validation_target)
training_targets = add_padding(training_targets, max_length)
testing_targets = add_padding(testing_targets, max_length)
validation_targets = add_padding(validation_targets, max_length)
np.save(training_target_dir, training_targets)
np.save(testing_target_dir, testing_targets)
np.save(validation_target_dir, validation_targets)
else:
training_targets = []
testing_targets = []
validation_targets = []
# create training targets
for sequence_id in range(encoded_training_ocr.shape[0]):
targets_sequence = []
for encoding_ocr, encoding_gt in zip(encoded_training_ocr[sequence_id], encoded_training_gt[sequence_id]):
if encoding_gt == 0:
targets_sequence.append(0)
elif encoding_ocr == encoding_gt:
targets_sequence.append(1)
else:
targets_sequence.append(2)
training_targets.append(targets_sequence)
training_targets = np.array(training_targets)
np.save(training_target_dir, training_targets)
# create testing targets
for sequence_id in range(encoded_testing_ocr.shape[0]):
targets_sequence = []
for encoding_ocr, encoding_gt in zip(encoded_testing_ocr[sequence_id], encoded_testing_gt[sequence_id]):
if encoding_gt == 0:
targets_sequence.append(0)
elif encoding_ocr == encoding_gt:
targets_sequence.append(1)
else:
targets_sequence.append(2)
testing_targets.append(targets_sequence)
testing_targets = np.array(testing_targets)
np.save(testing_target_dir, testing_targets)
# create validation targets
for sequence_id in range(encoded_validation_ocr.shape[0]):
targets_sequence = []
for encoding_ocr, encoding_gt in zip(encoded_validation_ocr[sequence_id], encoded_validation_gt[sequence_id]):
if encoding_gt == 0:
targets_sequence.append(0)
elif encoding_ocr == encoding_gt:
targets_sequence.append(1)
else:
targets_sequence.append(2)
validation_targets.append(targets_sequence)
validation_targets = np.array(validation_targets)
np.save(validation_target_dir, validation_targets)
################################################################################
@click.command()
@click.argument('in-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=False))
@click.option('--target-lang', default='de', help='The target language, i.e. the language to be kept.')
def filter_language(in_dir, out_dir, target_lang):
'''
Apply language filter to aligned data.
\b
Arguments:
in-dir -- The absolute path to the aligned data (JSON)
out-dir -- The absolute path to the filtered data (DB)
'''
# make paths absolute
in_dir = os.path.abspath(in_dir)
out_dir = os.path.abspath(out_dir)
with io.open(in_dir, mode='r') as f_in:
aligned_corpus = json.load(f_in)
corpus = Corpus()
for doc_id, doc in aligned_corpus.items():
corpus.add_doc(doc_id, doc)
corpus.convert_to_sqlite_format()
# unfiltered_data = unsqueeze_corpus(in_dir, out_dir, save=False)
#loaded_data, loaded_data_as_df, headers = load_alignments_from_sqlite(path=input_path, size='total')
identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)
len_total_corpus = 0
len_german_corpus = 0
# for doc_name, aligned_doc in aligned_corpus.items():
# for page_id, aligned_page in aligned_doc.items():
# new_ocr_page = []
# new_gt_page = []
# new_cer = []
# new_levenshtein = []
# new_min_distance = []
# new_max_distance = []
# new_similarity_encoding = []
# for ocr_line, gt_line, cer, levenshtein, min_distance, max_distance, similarity_encoding in zip(aligned_page[0],
# aligned_page[1],
# aligned_page[2],
# aligned_page[3],
# aligned_page[4],
# aligned_page[5],
# aligned_page[6]):
# lang, prob = identifier.classify(gt_line[1])
#
# if lang == target_lang and prob > 0.999:
# new_ocr_page.append(ocr_line)
# new_gt_page.append(gt_line)
# new_cer.append(cer)
# new_levenshtein.append(levenshtein)
# new_min_distance.append(min_distance)
# new_max_distance.append(max_distance)
# new_similarity_encoding.append(similarity_encoding)
#
# filtered_data = [new_ocr_page, new_gt_page, new_cer, new_levenshtein,
# new_min_distance, new_max_distance, new_similarity_encoding]
#
# for i in range(len(filtered_data)):
# aligned_corpus[doc_name][page_id][i] = filtered_data[i]
#
# len_total_corpus += len(aligned_page[0])
# len_german_corpus += len(new_ocr_page)
filtered_data = []
for i, alignment in enumerate(corpus.aligned_sequences):
gt_seq = alignment[4] # GT is taken as it is supposed to contain fewer errors than the OCR
lang, prob = identifier.classify(gt_seq)
if lang == target_lang and prob > 0.999:
filtered_data.append(alignment)
if i % 10000 == 0:
print('Language-filtered files: {}'.format(i))
print('Non-filtered data: {}'.format(len(unfiltered_data)))
print('Filtered data: {}'.format(len(filtered_data)))
save_alignments_to_sqlite(filtered_data, path=out_dir, append=False)
################################################################################
@click.command()
@click.argument('ocr-dir', type=click.Path(exists=True))
@click.argument('gt-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=False))
#@<EMAIL>.argument('out-ocr-dir', type=click.Path(exists=True))
#<EMAIL>.argument('out-gt-dir', type=click.Path(exists=True))
def parse_xml(ocr_dir, gt_dir, out_dir):
'''
Parse OCR and GT XML and save respective JSON files to output | |
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'fake-0-0'},
{'name': 'fake-0-1'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'fake-0-0'},
{'name': 'fake-0-1'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ERROR,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_that_has_been_deleted(self):
self.instance['deleted'] = 1
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "No valid host was found"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
# Regardless of vm_state deleted servers should be DELETED
self.assertEqual("DELETED", output['server']['status'])
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_no_instance_mapping(self,
mock_im):
self.instance['vm_state'] = vm_states.ERROR
mock_im.side_effect = exception.InstanceMappingNotFound(uuid='foo')
self.request.context = context.RequestContext('fake', 'fake')
self.view_builder.show(self.request, self.instance)
mock_im.assert_called_once_with(mock.ANY, self.uuid)
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_loaded(self, mock_im):
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
self.request.context = context.RequestContext('fake', 'fake')
self.view_builder.show(self.request, self.instance)
self.assertFalse(mock_im.called)
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error',
details='')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
output = self.view_builder.show(self.request, self.instance)
self.assertNotIn('fault', output['server'])
def test_build_server_detail_active_status(self):
# set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'fake-0-0'},
{'name': 'fake-0-1'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
metadata = nova_utils.metadata_to_dict(metadata)
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'fake-0-0'},
{'name': 'fake-0-1'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServersViewBuilderTestV269(ServersViewBuilderTest):
"""Server ViewBuilder test for microversion 2.69
The intent here is simply to verify that when showing server details
after microversion 2.69 the response could have missing keys for those
servers from the down cells.
"""
wsgi_api_version = '2.69'
def setUp(self):
super(ServersViewBuilderTestV269, self).setUp()
self.view_builder = views.servers.ViewBuilder()
self.ctxt = context.RequestContext('fake', 'fake')
def fake_is_supported(req, min_version="2.1", max_version="2.69"):
return (fakes.api_version.APIVersionRequest(max_version) >=
req.api_version_request >=
fakes.api_version.APIVersionRequest(min_version))
self.stub_out('nova.api.openstack.api_version_request.is_supported',
fake_is_supported)
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_get_server_list_detail_with_down_cells(self):
# Fake out 1 partially constructued instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
context=self.ctxt,
uuid=uuids.fake1,
project_id='fake',
created_at=datetime.datetime(1955, 11, 5)
)
]
req = self.req('/fake/servers/detail')
output = self.view_builder.detail(req, self.instances, True)
self.assertEqual(2, len(output['servers']))
image_bookmark = "http://localhost/fake/images/5"
expected = {
"servers": [{
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
'disk': 1,
'ephemeral': 1,
'vcpus': 1,
'ram': 256,
'original_name': 'flavor1',
'extra_specs': {},
'swap': 0
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"tags": [],
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"OS-EXT-SRV-ATTR:root_device_name": None,
"accessIPv4": '',
"accessIPv6": '',
"host_status": '',
"OS-EXT-SRV-ATTR:user_data": None,
"trusted_image_certificates": None,
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:kernel_id": '',
"OS-EXT-SRV-ATTR:reservation_id": '',
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:hostname": 'test_server',
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"locked": False,
"description": None,
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'fake-0-0'},
{'name': 'fake-0-1'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": '',
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False},
]
},
{
'created': '1955-11-05T00:00:00Z',
'id': uuids.fake1,
'tenant_id': 'fake',
"status": "UNKNOWN",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/servers/%s" %
uuids.fake1,
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/%s" %
uuids.fake1,
| |
the same length as the element_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band = Band(
folder=folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
fig = plt.figure(figsize=(figsize), dpi=400)
ax = fig.add_subplot(111)
_figure_setup(ax=ax, fontsize=fontsize, ylim=[erange[0], erange[1]])
band.plot_element_orbitals(
ax=ax,
element_orbital_pairs=element_orbital_pairs,
scale_factor=scale_factor,
color_list=color_list,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
def band_element_spd(
folder,
elements,
order=['s', 'p', 'd'],
output='band_element_spd.png',
spin='up',
scale_factor=6,
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
figsize=(4, 3),
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
):
"""
This function generates a s, p, d projected band structure on specific elements.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
elements (list): List of element symbols to project onto
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band = Band(
folder=folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
fig = plt.figure(figsize=(figsize), dpi=400)
ax = fig.add_subplot(111)
_figure_setup(ax=ax, fontsize=fontsize, ylim=[erange[0], erange[1]])
band.plot_element_spd(
ax=ax,
elements=elements,
order=order,
scale_factor=scale_factor,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
def band_plain_spin_polarized(
folder,
output='band_plain_sp.png',
up_color='black',
down_color='red',
linewidth=1.25,
up_linestyle='-',
down_linestyle='-',
figsize=(4, 3),
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
):
"""
This function generates a plain spin polarized band structure.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
up_color (str): Color of the spin-up lines
down_color (str): Color of the spin-down lines
linewidth (float): Line width of the band structure lines
up_linestyle (str): Line style of the spin-up bands
down_linestyle (str): Line style of the spin-down bands
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band_up = Band(
folder=folder,
spin='up',
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
hse=hse,
kpath=kpath,
n=n,
)
fig = plt.figure(figsize=(figsize), dpi=400)
ax = fig.add_subplot(111)
_figure_setup(ax=ax, fontsize=fontsize, ylim=[erange[0], erange[1]])
band_up.plot_plain(
ax=ax,
color=up_color,
linewidth=linewidth,
linestyle=up_linestyle,
)
band_down.plot_plain(
ax=ax,
color=down_color,
linewidth=linewidth,
linestyle=down_linestyle,
)
legend_lines = [
plt.Line2D(
[0],
[0],
color=up_color,
linestyle=up_linestyle
),
plt.Line2D(
[0],
[0],
color=down_color,
linestyle=down_linestyle
)
]
legend_labels = ['$\\uparrow$', '$\\downarrow$']
ax.legend(
legend_lines,
legend_labels,
ncol=1,
loc='upper left',
fontsize=fontsize,
bbox_to_anchor=(1, 1),
borderaxespad=0,
frameon=False,
handletextpad=0.1,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
def band_spd_spin_polarized(
folder,
output='band_spd_sp.png',
scale_factor=2,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates a spin polarized s, p, d projected band structure. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. | |
request.user
bmrr =0.0
if user.is_authenticated:
bmrr = bmr.objects.filter(us=user).order_by('-id')[0]
bmrobjlist = bmr.objects.filter(us=user)
bmrlist = []
bmrdate = []
for i in bmrobjlist:
bmrlist.append(i.bmr)
bmrdate.append(i.date)
if request.method=="POST":
weight_metric = request.POST.get("weight-metric")
weight_imperial = request.POST.get("weight-imperial")
if weight_metric:
weight = float(request.POST.get("weight-metric"))
height = float(request.POST.get("height-metric"))
age = int(request.POST.get("age-metric"))
gender = str(request.POST.get("gender-metric"))
status = str(request.POST.get("status-metric"))
elif weight_imperial:
weight = float(request.POST.get("weight-imperial"))/2.205
height = (float(request.POST.get("feet"))*30.48 + float(request.POST.get("inches"))*2.54)/100
age = int(request.POST.get("age-imperial"))
gender = str(request.POST.get("gender-imperial"))
status = str(request.POST.get("status-imperial"))
cont = bmrmain(weight,height,age,gender,status)
bmrr = cont
user = request.user
bmr.objects.create(us=user,bmr=round(bmrr),date=datetime.date.today())
user.weight = weight
user.height = height
user.age = age
user.gender = gender
user.status = status
user.save()
return redirect('bmrcal')
parms = {
'title':headtitle,
'bmr':bmrr,
'bmrlist':json.dumps(bmrlist),
'bmrdate':json.dumps(bmrdate,indent=4, sort_keys=True, default=str),
}
return render(request,'bmrmain.html',parms)
@api_view(['GET'])
@permission_classes((IsAuthenticated, ))
def foodapi(request):
if request.method == 'GET':
foods = food.objects.all()
serializers = foodSerializer(foods,many=True)
return Response(serializers.data)
# def callfoodapi(request):
# resp = requests.get('http://127.0.0.1:8000/api/food/',headers={'Authorization':'Token <PASSWORD>'})
# data = resp.json()
# return JsonResponse(data,safe=False)
# @api_view(['POST',])
# @permission_classes([])
# def registration_view(request):
# if request.method == "POST":
# serializer = RegistrationSerializer(data=request.data)
# data = {}
# if serializer.is_valid():
# user = serializer.save()
# data['response'] = "Succesfully registered a new user"
# data['mobno'] = user.mobno
# token = Token.objects.get(user=user).key
# data['token'] = token
# else:
# data = serializer.errors
# return Response(data)
def sendsms(mobno,otp):
url = "https://www.fast2sms.com/dev/bulkV2"
payload = "message=KOWI OTP-"+str(otp)+"&language=english&route=q&numbers="+str(mobno)
headers = {
'authorization': "<KEY>",
'Content-Type': "application/x-www-form-urlencoded",
'Cache-Control': "no-cache",
}
response = requests.request("POST", url, data=payload, headers=headers)
r = response.json()
state = r['return']
return state
def generateOTP() :
digits = "0123456789"
OTP = ""
for i in range(6) :
OTP += digits[math.floor(random.random() * 10)]
return OTP
@api_view(['POST',])
@permission_classes([])
def reg_view(request):
if request.method == 'POST':
serializer = RegSerializer(data=request.data)
data = {}
if serializer.is_valid():
username = serializer.validated_data['username']
mobno = serializer.validated_data['mobno']
password = serializer.validated_data['password']
gender = serializer.validated_data['gender']
otp = generateOTP()
state = sendsms(mobno,otp)
if state == True:
otpstore.objects.create(mobno=mobno,username=username,passw=password,otp=otp,gender=gender)
data['status'] = True
else:
data['status'] = False
else:
data = serializer.errors
return Response(data)
@api_view(['POST',])
@permission_classes([])
def otp_verify(request):
if request.method == "POST":
serializer = otpSerializer(data=request.data)
data = {}
if serializer.is_valid():
mobno = serializer.validated_data['mobno']
otp = serializer.validated_data['otp']
try:
getobj = otpstore.objects.filter(mobno=mobno).order_by('-id')[0]
except:
data['response'] = 'Error'
if otp == getobj.otp:
user = MyUser(username=getobj.username,mobno=mobno,gender=getobj.gender)
user.set_password(<PASSWORD>)
user.save()
data['response'] = "Successfully registered a new user"
data['status'] = True
token = Token.objects.get(user=user).key
data['token'] = token
obj = otpstore.objects.filter(mobno=mobno)
for i in obj:
i.delete()
else:
data['response'] = "Incorrect Otp"
data['status'] = False
else:
data = serializer.errors
return Response(data)
@api_view(['POST',])
@permission_classes([])
def login_view(request):
if request.method == "POST":
serializer = loginSerializer(data=request.data)
data = {}
if serializer.is_valid():
mobno = serializer.validated_data['mobno']
print(mobno)
otp = generateOTP()
state = sendsms(mobno,otp)
if state == True:
otpstore.objects.create(mobno=mobno,otp=otp)
data['status'] = True
else:
data['status'] = False
else:
data = serializer.errors
return Response(data)
@api_view(['POST',])
@permission_classes([])
def otp_loginverify(request):
if request.method == "POST":
serializer = otpSerializer(data=request.data)
data = {}
if serializer.is_valid():
mobno = serializer.validated_data['mobno']
otp = serializer.validated_data['otp']
try:
getobj = otpstore.objects.filter(mobno=mobno).order_by('-id')[0]
except:
data['response'] = 'Error'
if otp == getobj.otp:
user = MyUser.objects.get(mobno=mobno)
data['response'] = "Successful Login"
data['status'] = True
token = Token.objects.get(user=user).key
data['token'] = token
obj = otpstore.objects.filter(mobno=mobno)
for i in obj:
i.delete()
else:
data['response'] = "Incorrect Otp"
data['status'] = False
else:
data = serializer.errors
return Response(data)
@api_view(['POST','PUT','GET',])
@permission_classes((IsAuthenticated, ))
def profile_view(request):
if request.method == "POST" or request.method == "PUT":
serializer = ProfileSerializer(data=request.data,many=False)
data = {}
if serializer.is_valid():
user = request.user
user = serializer.update(user,serializer.validated_data)
data['response'] = "Succesfully Updated!"
else:
data = serializer.errors
return Response(data)
elif request.method == 'GET':
user = request.user
serializer = ProfileSerializer(user)
return Response(serializer.data)
@api_view(['GET',])
@permission_classes((IsAuthenticated, ))
def dietallapi(request):
if request.method == "GET":
user = request.user
serializers = DietSerializer(user,many=False)
data = {
}
data['response'] = "Successfull"
dietplans = user.diets.all()
for diet in dietplans:
data[diet.day] = {}
data[diet.day]["preworkout"] = {}
count=1
for pre in diet.preworkout.all():
data[diet.day]["preworkout"][count] = pre.fooditem.name
count+=1
data[diet.day]["postworkout"] = {}
count2=1
for pos in diet.postworkout.all():
data[diet.day]["postworkout"][count2] = pos.fooditem.name
count2+=1
data[diet.day]["lunch"] = {}
count3=1
for lun in diet.lunch.all():
data[diet.day]["lunch"][count3] = lun.fooditem.name
count3+=1
data[diet.day]["snacks"] = {}
count4=1
for snc in diet.snacks.all():
data[diet.day]["snacks"][count4] = snc.fooditem.name
count4+=1
data[diet.day]["dinner"] = {}
count5=1
for din in diet.dinner.all():
data[diet.day]["dinner"][count5] = din.fooditem.name
count5+=1
data[diet.day]["remarks"] = diet.remarks
return Response(data)
def lookcustomer(request,id):
try:
userr = MyUser.objects.get(id=id)
except ObjectDoesNotExist:
messages.error("No User Found")
return redirect('elogin')
user = request.user
if user.is_authenticated and user.is_staff == True and user.is_active == True and user.id == id:
emp = employeecontrol.objects.get(id=userr)
emp_type = emp.employeetype
alotted_users = emp.alloted.all()
parms = {
'title':"Lookup Customers | KOWI",
'emp_type':emp_type,
'alotted':alotted_users,
}
return render(request,'lookcustomer.html',parms)
else:
messages.error(request,"Login First")
return redirect('elogin')
def exercised(request,date):
title = "Exercise | Lifestyles"
user = request.user
if user.is_authenticated:
if user.is_staff != True:
count = 0
df = datetime.datetime.strptime(date,'%Y-%m-%d')
number_of_days = 7
date_list = []
unsliced = []
week_list = []
shortweek = ['MON','TUE','WED','THU','FRI','SAT','SUN']
for day in range(number_of_days):
a_date = (df + datetime.timedelta(days = day)).isoformat()
unsliced.append(a_date[0:10])
date_list.append(a_date[8:10])
for day in range(number_of_days):
tmp = date_list[day]
tm = datetime.datetime.strptime(date_list[day],'%d')
fm = tm.weekday()
if fm == 6:
fm = 0
else:
fm = fm+1
week_list.append(shortweek[fm])
currday = df.weekday()
currweek = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
curday = currweek[currday]
shortday = shortweek[currday]
flag = False
try:
exe = user.fitness.get(day=curday)
except ObjectDoesNotExist:
flag = True
if flag == False:
exena = exe.exercisename.all()
exenaquant = []
for i in exena:
try:
quant = quantyrepssets.objects.get(Q(user=user) & Q(exername=i) & Q(day=curday))
exenaquant.append(quant)
except ObjectDoesNotExist:
exenaquant.append("")
try:
log = exlogs.objects.get(Q(date=df) & Q(us=user))
except ObjectDoesNotExist:
log = exlogs.objects.create(us=user,date=df)
free = []
exlog = log.exercisename.all()
for i in exena:
if i not in exlog:
free.append(i)
logquant = []
for i in exlog:
if i in exena:
count+=1
try:
quant = quantyrepssets.objects.get(Q(user=user) & Q(exername=i) & Q(day=curday))
logquant.append(quant)
except ObjectDoesNotExist:
logquant.append("")
if count != 0:
if exena.count() != count:
count = int(100/(count+1))
else:
count = int(100)
if request.method == "POST":
if 'cal' in request.POST:
inc = request.POST['incoming']
year = inc[-4:]
month = inc[0:2]
da = inc[3:5]
ur = year+"-"+month+"-"+da
return redirect('exercise',ur)
if 'work' in request.POST:
fo = request.POST['tags']
qu = request.POST['quan']
qua = request.POST['quans']
item = exercise.objects.get(id=fo)
if item not in exlog:
log.exercisename.add(item)
try:
quant = quantyrepssets.objects.get(Q(user=user) & Q(exername=item) & Q(day=curday))
except ObjectDoesNotExist:
quant = quantyrepssets.objects.create(user=user,exername=item,day=curday)
quant.quantsets += int(qu)
quant.quantreps += int(qua)
quant.save()
messages.success(request,"Exercise Log Updated")
return redirect('exercise',date)
else:
quant = quantyrepssets.objects.get(Q(user=user) & Q(exername=item) & Q(day=curday))
quant.quantsets += int(qu)
quant.quantreps += int(qua)
quant.save()
messages.success(request,"Quantity Updated")
return redirect('exercise',date)
if 'exsave' in request.POST:
l = list(set(chain(free,exlog)))
for meal in l:
try:
checker = request.POST[str(meal.id)]
if checker == "on":
if meal not in exlog:
log.exercisename.add(meal)
messages.success(request,"Exercise Logs Updated")
log.save()
except MultiValueDictKeyError:
if meal not in exena:
quant = quantyrepssets.objects.get(Q(user=user) & Q(exername=meal) & Q(day=curday))
quant.delete()
log.exercisename.remove(meal)
messages.success(request,"Exercise Logs Erased!")
log.save()
return redirect('exercise',date)
parms = {
'title':title,
'day':curday,
'exercises':exercise.objects.all(),
'exercise':zip(exena,exenaquant),
'date':datetime.date.today(),
'free':zip(free,exenaquant),
'exlog':zip(exlog,logquant),
'count':count,
'week_list':zip(week_list,date_list,unsliced),
}
else:
if request.method == "POST":
if 'cal' in request.POST:
inc = request.POST['incoming']
year = inc[-4:]
month = inc[0:2]
da = inc[3:5]
ur = year+"-"+month+"-"+da
return redirect('foodplans',ur)
parms = {
'title':title,
'day':curday,
'date':datetime.date.today(),
'week_list':zip(week_list,date_list,unsliced),
}
return render(request,'Exercises.html',parms)
else:
return render(request,'404.html')
else:
return redirect('login')
def update(pre,log,part):
free = []
if part == 1:
lor = log.preworkout.all()
elif part == 2:
lor = log.postworkout.all()
elif part == 3:
lor = log.lunch.all()
elif part == 4:
lor = log.snacks.all()
else:
lor = log.dinner.all()
for i in pre:
if i not in lor:
free.append(i)
return free
def foodquantityreturn(user,mealer,listt,curday):
quantity = []
for i in listt:
try:
quant = quantuser.objects.get(Q(user=user) & Q(foodit=i.fooditem) & Q(meal=mealer) & Q(day=curday))
quantity.append(quant)
except ObjectDoesNotExist:
quantity.append("")
return quantity
def rescale(values, new_min = 0, new_max = 100):
output = []
old_min, old_max = min(values), max(values)
for v in values:
new_v = (new_max - new_min) / (old_max - old_min) * (v - old_min) + new_min
output.append(new_v)
return output
def foodplans(request,date):
title = "Food Plans | KOWI Lifestyles"
user = request.user
if user.is_authenticated:
if user.is_staff != True:
foog = food.objects.all()
df = datetime.datetime.strptime(date,'%Y-%m-%d')
number_of_days = 7
date_list = []
unsliced = []
week_list = []
shortweek = ['MON','TUE','WED','THU','FRI','SAT','SUN']
for day in range(number_of_days):
a_date = (df + datetime.timedelta(days = day)).isoformat()
unsliced.append(a_date[0:10])
date_list.append(a_date[8:10])
for day in range(number_of_days):
tmp = date_list[day]
tm = datetime.datetime.strptime(date_list[day],'%d')
fm = tm.weekday()
if fm == 6:
fm = 0
else:
fm = fm+1
week_list.append(shortweek[fm])
currday = df.weekday()
currweek = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
curday = currweek[currday]
shortday = shortweek[currday]
flag = False
try:
diet = user.diets.get(day=curday)
except ObjectDoesNotExist:
flag = True
pre = diet.preworkout.all()
post = diet.postworkout.all()
lunch = diet.lunch.all()
snacks = diet.snacks.all()
dinner = diet.dinner.all()
try:
logg = logs.objects.get(Q(date=df) & | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.example.library_v1.services.library_service import LibraryServiceAsyncClient
from google.example.library_v1.services.library_service import LibraryServiceClient
from google.example.library_v1.services.library_service import pagers
from google.example.library_v1.services.library_service import transports
from google.example.library_v1.services.library_service.transports.base import _GOOGLE_AUTH_VERSION
from google.example.library_v1.types import library
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert LibraryServiceClient._get_default_mtls_endpoint(None) is None
assert LibraryServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert LibraryServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert LibraryServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert LibraryServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert LibraryServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
LibraryServiceClient,
LibraryServiceAsyncClient,
])
def test_library_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'library-example.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.LibraryServiceGrpcTransport, "grpc"),
(transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_library_service_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
LibraryServiceClient,
LibraryServiceAsyncClient,
])
def test_library_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'library-example.googleapis.com:443'
def test_library_service_client_get_transport_class():
transport = LibraryServiceClient.get_transport_class()
available_transports = [
transports.LibraryServiceGrpcTransport,
]
assert transport in available_transports
transport = LibraryServiceClient.get_transport_class("grpc")
assert transport == transports.LibraryServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(LibraryServiceClient, transports.LibraryServiceGrpcTransport, "grpc"),
(LibraryServiceAsyncClient, transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(LibraryServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LibraryServiceClient))
@mock.patch.object(LibraryServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LibraryServiceAsyncClient))
def test_library_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(LibraryServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(LibraryServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(LibraryServiceClient, transports.LibraryServiceGrpcTransport, "grpc", "true"),
(LibraryServiceAsyncClient, transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(LibraryServiceClient, transports.LibraryServiceGrpcTransport, "grpc", "false"),
(LibraryServiceAsyncClient, transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(LibraryServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LibraryServiceClient))
@mock.patch.object(LibraryServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LibraryServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_library_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(LibraryServiceClient, transports.LibraryServiceGrpcTransport, "grpc"),
(LibraryServiceAsyncClient, transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_library_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(LibraryServiceClient, transports.LibraryServiceGrpcTransport, "grpc"),
(LibraryServiceAsyncClient, transports.LibraryServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_library_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_library_service_client_client_options_from_dict():
with mock.patch('google.example.library_v1.services.library_service.transports.LibraryServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = LibraryServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_shelf(transport: str = 'grpc', request_type=library.CreateShelfRequest):
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_shelf),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = library.Shelf(
name='name_value',
theme='theme_value',
)
response = client.create_shelf(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == library.CreateShelfRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, library.Shelf)
assert response.name == 'name_value'
assert response.theme == 'theme_value'
def test_create_shelf_from_dict():
test_create_shelf(request_type=dict)
def test_create_shelf_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_shelf),
'__call__') as call:
client.create_shelf()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == library.CreateShelfRequest()
@pytest.mark.asyncio
async def test_create_shelf_async(transport: str = 'grpc_asyncio', request_type=library.CreateShelfRequest):
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_shelf),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(library.Shelf(
name='name_value',
theme='theme_value',
))
response = await client.create_shelf(request)
# Establish that the underlying gRPC stub | |
['AMAPA', '/AP'],
['AMAZONAS', '/AM'],
['BAHIA', '/BA'],
['CEARA', '/CE'],
['DISTRITO FEDERAL', '/DF'],
['ESPIRITO SANTO', '/ES'],
['GOIAS', '/GO'],
['MARANHAO', '/MA'],
['MATO GROSSO DO SUL', '/MS'],
['MATO GROSSO', '/MT'],
['MINAS GERAIS', '/MG'],
['PARAIBA', '/PB'],
['PARANA', '/PR'],
['PERNAMBUCO', '/PE'],
['PIAUI', '/PI'],
['RIO DE JANEIRO', '/RJ'],
['RIO GRANDE DO NORTE', '/RN'],
['RIO GRANDE DO SUL', '/RS'],
['RONDONIA', '/RO'],
['RORAIMA', '/RR'],
['SANTA CATARINA', '/SC'],
['SAO PAULO', '/SP'],
['SERGIPE', '/SE'],
['PARA', '/PA'],
['TOCANTINS', '/TO']
]
for item in estados:
string = string.replace(item[1],item[0])
return string
def siglas():
return ['AC',
'AL',
'AP',
'AM',
'BA',
'CE',
'DF',
'ES',
'GO',
'MA',
'MS',
'MT',
'MG',
'PB',
'PR',
'PE',
'PI',
'RJ',
'RN',
'RS',
'RO',
'RR',
'SC',
'SP',
'SE',
'PA',
'TO']
# funções de limpeza
def limpar(fonte):
fonte = fonte.replace('\n',' ')
fonte = fonte.replace(' ',' ')
fonte = fonte.replace(' ',' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip(' ')
fonte = fonte.lstrip('"')
fonte = fonte.lstrip('>')
fonte = fonte.replace(' ',' ')
fonte = fonte.replace('\t', '')
fonte = fonte.replace('/#','')
fonte = fonte.strip(' ')
fonte = fonte.strip(' ')
fonte = fonte.strip('-')
fonte = fonte.strip(' ')
fonte = fonte.strip(' ')
fonte = fonte.strip(' ')
return fonte
def limpar_numero(numero):
numero = numero.replace('<FONT COLOR=RED><B>','')
numero = numero.replace('</B></FONT>','')
numero = "0"*(4-len(numero))+numero
return numero
def limpar_classe(string):
string = limpar(string)
string = string.replace('ACAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('ACAO DIRETA DE INCONSTITUCI0NALIDADE','ADI')
string = string.replace('7CAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('01CAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('CAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('PACAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('sACAO DIRETA DE INCONSTITUCIONALIDADE','ADI')
string = string.replace('ARGUICAO DE DESCUMPRIMENTO DE PRECEITO FUNDAMENTAL','ADPF')
def limpar_cln(string):
string = string.upper()
string = remover_acentos(string)
string = string.replace ('( MED','(MED')
string = string.replace ('E(MED','E (MED')
string = string.replace ('(LIMINAR)','(MED. LIMINAR)')
string = string.replace ('E MED.','E (MED')
string = string.replace ('CAUTELAR','LIMINAR')
def limpar_decisao (string):
string = string.replace('\n','')
string = string.replace('\t','')
string = string.replace(' ','')
string = string.replace(' ',' ')
string = string.upper()
string = remover_acentos(string)
return string
def limpar_arquivo(nomedoarquivo):
arquivoaberto = open(nomedoarquivo, mode='w',
encoding="utf-8", newline='')
arquivoaberto.close()
def write_csv_header (nomedoarquivo, string_campos):
string_campos = string_campos.replace('\n','')
lista_de_campos = string_campos.split(',')
if nomedoarquivo in os.listdir():
arquivoaberto = open(nomedoarquivo, mode='r+',
encoding="utf-8", newline='')
html = arquivoaberto.read()
arquivoaberto.close()
if lista_de_campos[0] not in html[:100]:
arquivoaberto = open(nomedoarquivo, mode='w',
encoding="utf-8", newline='')
arquivoaberto_csv = csv.writer(arquivoaberto, delimiter=',')
arquivoaberto_csv.writerow(lista_de_campos)
arquivoaberto.close()
else:
arquivoaberto = open(nomedoarquivo, mode='w',
encoding="utf-8", newline='')
arquivoaberto_csv = csv.writer(arquivoaberto, delimiter=',')
arquivoaberto_csv.writerow(lista_de_campos)
arquivoaberto.close()
def esperar (segundos, ciclos, variavel0):
if variavel0%ciclos == 0 and variavel0 != 0:
print ('espera ' + str(variavel0))
time.sleep(segundos)
def write_csv_line (nomedoarquivo,dados):
if dados != []:
arquivoaberto = open(nomedoarquivo, mode='a+',
encoding="utf-8", newline='')
arquivoaberto_csv = csv.writer(arquivoaberto, delimiter=',', quotechar = '"')
arquivoaberto_csv.writerow(dados)
arquivoaberto.close()
def write_csv_lines (nomedoarquivo, dados):
if dados != []:
arquivoaberto = open(nomedoarquivo, mode='a+',
encoding="utf-8", newline='')
arquivoaberto_csv = csv.writer(arquivoaberto, delimiter=',', quotechar = '"')
arquivoaberto_csv.writerows(dados)
arquivoaberto.close()
def extrai_acordaos_da_string (arquivo_a_extrair, path): # usar duas contra-barras depois do nome
if arquivo_a_extrair in os.listdir(path):
nome_do_arquivo = str(path+arquivo_a_extrair)
acordaos = carregar_arquivo (nome_do_arquivo)
# print (arquivo_a_extrair)
n_acordaos = extrair(acordaos,'Documentos encontrados: ','</td>')
acordaos_publicados = []
adi_decisao = 'NA'
acordaos_publicados = []
acordaos_adi = []
acordaos_agr = []
acordaos_emb = []
acordaos_qo = []
acordaos_outros = []
if "Nenhum registro encontrado" in acordaos:
decisao_colegiada = []
else:
decisao_colegiada = []
for decisoes in range (int(n_acordaos)):
acordaos_adi = []
acordaos_emb = []
acordaos_agr = []
acordaos_qo = []
acordaos_outros = []
lista_processos_citados = []
lista_procesoss_citados_com_tema = []
acordao_tipo = 'NA'
processo_juris = 'NA'
relator_juris = 'NA'
data_acordao = 'NA'
orgao_julgador_acordao = 'NA'
publicacao_acordao = 'NA'
ementa = 'NA'
decisao_juris = 'NA'
legislacao = 'NA'
observacao = 'NA'
doutrina = 'NA'
acordaos = acordaos.replace ('/n/n','/n')
acordaos = acordaos.replace ('/t','')
processo_juris = extrair(acordaos,'''<!-- Término do trecho que passa informações para o QueryString (Pesquisa Simultânea de Jurisprudência) --''', '<br />').upper()
processo_juris = processo_juris.replace('AÇÃO DIRETA DE INCONSTITUCIONALIDADE', 'ADI')
processo_juris = processo_juris.replace('ACAO DIRETA DE INCONSTITUCIONALIDADE', 'ADI')
if 'ADI' in arquivo_a_extrair:
processo_juris = processo_juris.replace('AÇÃO DECLARATÓRIA DE CONSTITUCIONALIDADE', 'ADI')
processo_juris = processo_juris.replace('MEDIDA CAUTELAR', 'MC')
processo_juris = processo_juris.replace('\n', '')
processo_juris = processo_juris.replace('\t', '')
processo_juris = processo_juris.replace('>', '')
processo_juris = processo_juris.replace('REFERENDO NA MC','MC (REFERENDO)')
processo_juris = processo_juris.replace('REFERENDO NOS EMB.DECL.','EMB.DECL. (REFERENDO)')
processo_juris = processo_juris.replace('REFERENDO NO AG.REG.','AG.REG (REFERENDO)')
processo_juris = processo_juris.replace('SEGUNDOS ','')
processo_juris = processo_juris.replace('SEGUNDO','')
processo_juris = processo_juris.replace('TERCEIROS','')
acordao_tipo = 'NA'
if processo_juris[0:3] == "MC ":
acordao_tipo = "MC"
elif processo_juris[0:3] == "EMB":
acordao_tipo = 'EMBARGOS'
elif processo_juris[0:2] == "AG":
acordao_tipo = 'AGRAVO'
elif processo_juris[0:3] == "QUE":
acordao_tipo = 'QO'
elif processo_juris[0:3] == "ADI":
acordao_tipo = 'PRINCIPAL'
else:
acordao_tipo = 'OUTROS'
relator_juris = extrair(acordaos, 'Relator(a):  ', '<br />').upper()
relator_juris = relator_juris.lstrip(' ')
relator_juris = relator_juris.lstrip('MIN.')
relator_juris = relator_juris.lstrip(' ')
relator_juris = remover_acentos(relator_juris)
data_acordao = extrair(acordaos, 'Julgamento: ', ' ')
data_acordao = data_acordao.replace('\t','')
orgao_julgador_acordao = extrair(acordaos, 'Órgão Julgador: ', '<br />')
publicacao_acordao = extrair (acordaos, '''<PRE><span style='font-family:tahoma, verdana, arial, sans-serif;font-size:1.1 em;font-weight:bold'>''', '</PRE>')
ementa = extrair (acordaos, '''<p><div style="line-height: 150%;text-align: justify;">''', '</div>')
decisao_juris = extrair (acordaos, '''<p><div style="text-align:justify; color: #385260; font-weight: normal; font-size: 11px">''', '</div>')
legislacao = extrair (acordaos, '''Legislação</strong></p>''', '</PRE>')
legislacao = legislacao.replace('\t','')
legislacao = legislacao.replace('\n','')
observacao = extrair (acordaos, '''<p><strong>Observação</strong></p>''', '</PRE>')
if 'Acórdão(s) citado(s)' in acordaos and 'Nenhum registro encontrado' not in acordaos and 'AGUARDANDO INDEXAÇÃO' not in acordaos:
observacao = observacao.replace ('(s)','')
observacao = observacao.replace ('(2ªT)','')
observacao = observacao.replace ('(1ªT)','')
observacao = observacao.replace ('(TP)','')
n_cit = observacao.count('href')
for links in range (n_cit):
inicio = observacao.find ('href')
fim = observacao.find ('>',inicio)
retirar = observacao[inicio:fim]
observacao = observacao.replace(retirar,'')
observacao = observacao.replace('\n','')
observacao = observacao.replace('<a>','')
observacao = observacao.replace('<a >','')
observacao = observacao.replace('</a>','')
observacao = observacao.replace(' ,',',')
observacao = observacao.replace(' .','.')
observacao = observacao.replace('.','')
observacao = observacao.split('(')[1:]
if observacao != [] and 'Número de páginas' in observacao[-1]:
observacao[-1] = extrair (observacao[-1],'','Número de páginas')
lista_processos_citados = []
lista_procesoss_citados_com_tema = []
for obs in range (len(observacao)):
elemento = observacao[obs]
elemento = elemento.split(')')
tema = [elemento.pop(0)]
elemento = str(elemento).split(',')
for item in range (len(elemento)):
processo_citado = elemento[item]
processo_citado = processo_citado.lstrip('[')
processo_citado = processo_citado.lstrip("]")
processo_citado = processo_citado.lstrip(' ')
processo_citado = processo_citado.lstrip("'")
processo_citado_e_tema = processo_citado + ',' + str(tema)
lista_processos_citados.append(processo_citado)
lista_procesoss_citados_com_tema.append(processo_citado_e_tema)
doutrina = extrair(acordaos, '''<p><strong>Doutrina</strong></p>''', '</PRE>')
decisao_colegiada = [arquivo_a_extrair, acordao_tipo, processo_juris, relator_juris, data_acordao, orgao_julgador_acordao, publicacao_acordao, ementa, decisao_juris, legislacao, observacao, lista_processos_citados, lista_procesoss_citados_com_tema, doutrina]
# print (decisao_colegiada)
acordaos_publicados.append(decisao_colegiada)
if acordao_tipo == 'PRINCIPAL':
acordaos_adi.append(decisao_colegiada)
adi_decisao = decisao_juris
if acordao_tipo == 'EMB':
acordaos_emb.append(decisao_colegiada)
if acordao_tipo == 'AGR':
acordaos_agr.append(decisao_colegiada)
if acordao_tipo == 'QO':
acordaos_qo.append(decisao_colegiada)
if acordao_tipo == 'OUTROS':
acordaos_outros.append(decisao_colegiada)
recortar = acordaos.find('<!-- Término do trecho que passa informações para o QueryString (Pesquisa Simultânea de Jurisprudência) --')
acordaos = acordaos[recortar+len('<!-- Término do trecho que passa informações para o QueryString (Pesquisa Simultânea de Jurisprudência) --'):]
return (arquivo_a_extrair,
adi_decisao,
acordaos_publicados,
acordaos_adi,
acordaos_agr,
acordaos_emb,
acordaos_qo,
acordaos_outros)
else:
return ([], 'NA', [], [], [], [], [], [])
def extrai_mono_da_string (arquivo_a_extrair, path): # usar duas contra-barras depois do nome
if arquivo_a_extrair in os.listdir(path):
nome_do_arquivo = str(path+arquivo_a_extrair)
monocraticas = carregar_arquivo (nome_do_arquivo)
# print (arquivo_a_extrair)
# n_monocraticas = extrair(monocraticas,'Documentos encontrados: ','</td>')
n_monocraticas = monocraticas.count('img src="imagem/bt_imprimirpopup.gif" alt="Imprimir" style="position:relative;left:490px;top:-38px;margin-bottom:-55px;')
adi_decisao_mono = 'NA'
monocraticas_publicadas = []
monocraticas_adi = []
monocraticas_agr = []
monocraticas_emb = []
monocraticas_qo = []
monocraticas_outros = []
monocraticas_amicus = []
monocraticas_mc = []
monocraticas_publicadas = []
processo_juris = 'NA'
if "Nenhum registro encontrado" in monocraticas:
decisao_monocratica = []
else:
decisao_monocratica = []
for decisoes in range (int(n_monocraticas)):
monocraticas_adi = []
monocraticas_emb = []
monocraticas_agr = []
monocraticas_qo = []
monocraticas_outros = []
acordao_tipo = 'NA'
processo_juris = 'NA'
relator_juris = 'NA'
data_acordao = 'NA'
orgao_julgador_acordao = 'NA'
decisao_juris = 'NA'
legislacao = 'NA'
observacao = 'NA'
monocraticas = monocraticas.replace ('/n/n','/n')
monocraticas = monocraticas.replace ('/t','')
processo_juris = extrair(monocraticas,'''<img src="imagem/bt_imprimirpopup.gif" alt="Imprimir" style="position:relative;left:490px;top:-38px;margin-bottom:-55px;" />''', '<br />').upper()
processo_juris = processo_juris.replace('AÇÃO DIRETA DE INCONSTITUCIONALIDADE', 'ADI')
processo_juris = processo_juris.replace('ACAO DIRETA DE INCONSTITUCIONALIDADE', 'ADI')
processo_juris = processo_juris.replace('MEDIDA CAUTELAR', 'MC')
processo_juris = processo_juris.replace('\n', '')
processo_juris = processo_juris.replace('\t', '')
processo_juris = processo_juris.split('<STRONG>')[1]
acordao_tipo = 'na'
if "AMICUS" in processo_juris:
moocratica_tipo = "AMICUS"
elif 'MC' in processo_juris or 'CAUT' in processo_juris:
moocratica_tipo = "CAUT"
elif 'EMB.' in processo_juris:
moocratica_tipo = 'EMB'
elif 'AG.REG' in processo_juris:
moocratica_tipo = 'AGR'
elif 'ORDEM' in processo_juris or 'QO' in processo_juris:
moocratica_tipo = 'QO'
elif processo_juris[0:3] == | |
<gh_stars>1-10
#!/usr/bin/env python3
###############################################################################
# Program: EPI-ClusT.py
# Type: Python Script
# Version: 1.0
# Author: <NAME>
# Description: Empiral Phylogeny Informed Cluster Tool for identifying
# distance thresholds and defining clusters in phylogenetic trees
# License: MIT
###############################################################################
from queue import Queue
from treeswift import read_tree_newick
import PySimpleGUI as sg
import os
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import math
import statistics
import numpy
NUM_THRESH = 1000 # number of thresholds to calculate genetic distance over
# cut out the current node's subtree (by setting all nodes' DELETED to True) and return list of leaves
def cut(node):
cluster = list()
descendants = Queue()
descendants.put(node)
while not descendants.empty():
descendant = descendants.get()
if descendant.DELETED:
continue
descendant.DELETED = True
descendant.left_dist = 0
descendant.right_dist = 0
descendant.edge_length = 0
if descendant.is_leaf():
cluster.append(str(descendant))
else:
for c in descendant.children:
descendants.put(c)
return cluster
# initialize properties of input tree and return set containing taxa of leaves
def prep(tree, support):
tree.resolve_polytomies()
tree.suppress_unifurcations()
leaves = set()
for node in tree.traverse_postorder():
if node.edge_length is None:
node.edge_length = 0
node.DELETED = False
if node.is_leaf():
leaves.add(str(node))
else:
try:
node.confidence = float(str(node))
except:
node.confidence = 100. # give edges without support values support 100
if node.confidence < support: # don't allow low-support edges
node.edge_length = float('inf')
return leaves
# split leaves into minimum number of clusters such that the maximum leaf pairwise distance is below some threshold
def min_clusters_threshold_max(tree,threshold,support):
leaves = prep(tree,support)
clusters = list()
for node in tree.traverse_postorder():
# if I've already been handled, ignore me
if node.DELETED:
continue
# find my undeleted max distances to leaf
if node.is_leaf():
node.left_dist = 0; node.right_dist = 0
else:
children = list(node.children)
if children[0].DELETED and children[1].DELETED:
cut(node); continue
if children[0].DELETED:
node.left_dist = 0
else:
node.left_dist = max(children[0].left_dist,children[0].right_dist) + children[0].edge_length
if children[1].DELETED:
node.right_dist = 0
else:
node.right_dist = max(children[1].left_dist,children[1].right_dist) + children[1].edge_length
# if my kids are screwing things up, cut out the longer one
if node.left_dist + node.right_dist > threshold:
if node.left_dist > node.right_dist:
cluster = cut(children[0])
node.left_dist = 0
else:
cluster = cut(children[1])
node.right_dist = 0
# add cluster
if len(cluster) != 0:
clusters.append(cluster)
for leaf in cluster:
leaves.remove(leaf)
# add all remaining leaves to a single cluster
if len(leaves) != 0:
clusters.append(list(leaves))
return clusters
# min_clusters_threshold_max, but all clusters must define a clade
def min_clusters_threshold_max_clade(tree, threshold, support):
leaves = prep(tree, support)
clusters = list()
for node in tree.traverse_postorder():
# if I've already been handled, ignore me
if node.DELETED:
continue
# find my undeleted max distances to leaf
if node.is_leaf():
node.left_dist = 0
node.right_dist = 0
else:
children = list(node.children)
if children[0].DELETED and children[1].DELETED:
cut(node)
continue
if children[0].DELETED:
node.left_dist = 0
else:
node.left_dist = max(children[0].left_dist, children[0].right_dist) + children[0].edge_length
if children[1].DELETED:
node.right_dist = 0
else:
node.right_dist = max(children[1].left_dist, children[1].right_dist) + children[1].edge_length
# if my kids are screwing things up, cut both
if node.left_dist + node.right_dist > threshold:
cluster_l = cut(children[0])
node.left_dist = 0
cluster_r = cut(children[1])
node.right_dist = 0
# add cluster
for cluster in (cluster_l, cluster_r):
if len(cluster) != 0:
clusters.append(cluster)
for leaf in cluster:
leaves.remove(leaf)
# add all remaining leaves to a single cluster
if len(leaves) != 0:
clusters.append(list(leaves))
return clusters
# pick the threshold between 0 and "distance threshold" that maximizes number of (non-singleton) clusters
def auto_cluster(method, tree, threshold, support, display_fig):
supportTemp = float('-inf')
if display_fig is True:
distfile = open("EPI-ClusT_PlotData_NumClusters_by_DistanceThreshold.txt", 'w')
distfile.write("Distance\tNumClusters\n")
from copy import deepcopy
thresholds = [i*threshold/NUM_THRESH for i in range(NUM_THRESH+1)]
best = None
best_num = -1
best_t = -1
distv = []
xs = []
ys = []
for i, t in enumerate(thresholds):
sg.OneLineProgressMeter('EPI-ClusT', i+1, len(thresholds)-1, 'key', 'Computing best genetic distance threshold...', orientation='h')
clusters = method(deepcopy(tree), t, supportTemp)
num_non_singleton = len([c for c in clusters if len(c) > 1])
if display_fig is True:
distfile.write("%s\t%s\n" % (t, num_non_singleton))
xs.append(float(t))
ys.append(int(num_non_singleton))
if num_non_singleton > best_num:
best = clusters
best_num = num_non_singleton
raw_t = t
best_t = float(round(t, 3))
best = method(deepcopy(tree), best_t, support)
outfile.write("Genetic Distance Uperbound: %s\n" % threshold)
outfile.write("Best Distance Threshold: %s\n" % best_t)
if display_fig is True:
distfile.close()
plt.figure(2)
plt.bar(xs, ys, width=0.001)
plt.ylabel('Number of Clusters')
plt.xlabel('Genetic Distance Threshold')
return best
# plot distance histogram
def gen_hist(tree, display_fig):
# if display_fig is True:
# histfile = open("EPI-ClusT_PlotData_Pairwise_Distance_Histogram.txt", 'w')
pw_dists = []
distance_matrix = tree.distance_matrix(leaf_labels=True)
distance_matrix_keys = list(distance_matrix.keys())
for i in range(len(distance_matrix_keys)):
u = distance_matrix_keys[i]
sg.OneLineProgressMeter('EPI-ClusT', i+1, len(distance_matrix_keys)-1, 'key', 'Analyzing pairwise distances...', orientation='h')
for v in distance_matrix[u].keys():
pw_dists.append(distance_matrix[u][v])
# if display_fig is True:
# histfile.write("%s\t%s\t%s\n" % (u, v, distance_matrix[u][v]))
bin_size = int(math.ceil(math.sqrt(len(pw_dists)) / 10.0)) * 10
plt.figure(1)
plt.hist(pw_dists, bins=bin_size)
plt.ylabel('Count')
plt.xlabel('Sample Pairwise Genetic Distance')
histarray = plt.hist(pw_dists, bins=bin_size)[0]
binsarray = plt.hist(pw_dists, bins=bin_size)[1]
# if display_fig is True:
# histfile.close()
return histarray, binsarray
# generate edge list to visualize clusters in gephi
def generate_edge_list(tree, cluster_members):
outname = "EPI-ClusT_Network_Diagram_Edge_List.txt"
outfile = open(outname, 'w')
outfile.write("Source\tTarget\n")
distance_matrix = tree.distance_matrix(leaf_labels=True)
for cluster_num in cluster_members.keys():
clustered_samples = cluster_members[cluster_num]
if len(clustered_samples) == 2:
outfile.write("%s\t%s\n" % (clustered_samples[0], clustered_samples[1]))
else:
for i in range(len(clustered_samples)):
id1 = clustered_samples[i]
dist = 1000
edgeTo = ''
for j in range(i+1, len(clustered_samples)):
id2 = clustered_samples[j]
if distance_matrix[id1][id2] < dist:
dist = distance_matrix[id1][id2]
edgeTo = id2
if edgeTo != '':
outfile.write('%s\t%s\n' % (edgeTo, id1))
outfile.close()
if __name__ == "__main__":
# Render GUI window
passingfile = False
passingdist = False
passingsupp = False
window = ''
while passingfile is False or passingdist is False or passingsupp is False:
if window != '':
window.Close()
layout = [
[sg.Text("EPI-ClusT", font=('Helvetica', 24, 'bold'))],
[sg.Text("Empirical Phylogeny Informed Cluster Tool", font=('Helvetica', 16))],
[sg.Text("Written By: <NAME>, Johns Hopkins University\n", font=('Helvetica', 12))],
[sg.Text('Newick Tree File*:', font=('Helvetica', 13)), sg.InputText(font=('Helvetica 13'), key='infilename'), sg.FileBrowse(font=('Helvetica 13'))],
[sg.Text('Output Filename*:', font=('Helvetica', 13)), sg.InputText(font=('Helvetica 13'), default_text='EPI-ClusT_Results.txt', text_color='gray', key='outfilename')],
[sg.Text('Genetic Distance Threshold (optional):', font=('Helvetica 13')), sg.InputText(font=('Helvetica 13'), key='dist'), sg.Checkbox('Compute Best Distance Threshold', font=('Helvetica 13'), default=False, key='df')],
[sg.Text('Support Threshold (optional):', font=('Helvetica 13')), sg.InputText(font=('Helvetica 13'), key='support')],
[sg.Checkbox('Plot Clusters Histogram', font=('Helvetica 13'), default=False, key='plothist'), sg.Checkbox('Export Network Edge List', font=('Helvetica 13'), default=False, key='edge'), sg.Checkbox('Rooted Tree: Use Clade Support', font=('Helvetica 13'), default=False, key='rooted')],
[sg.OK('Analyze', font=('Helvetica', 13), size=(10, 2))]]
window = sg.Window('EPI-ClusT', layout)
event, values = window.Read()
# parse user arguments
if os.path.exists(values['infilename']) is not True:
sg.Popup("Error: Input tree not found.", font=('Helvetica', 13, 'bold'))
passingfile = False
else:
passingfile = True
try:
float(values['dist'])
if float(values['dist']) > 1 or float(values['dist']) < 0:
sg.Popup("Error: Genetic distance threshold must be between 0 and 1.", font=('Helvetica', 13, 'bold'))
passingdist = False
else:
passingdist = True
except ValueError:
if values['df'] is not True:
sg.Popup("Error: Genetic distance threshold must be between 0 and 1 or 'Compute Best Distance Threshold' must be selected.", font=('Helvetica', 13, 'bold'))
passingdist = False
else:
passingdist = True
if values['support'] != '':
try:
float(values['support'])
if float(values['support']) > 1 or float(values['support']) < 0:
sg.Popup("Error: Support threshold must be between 0 and 1.", font=('Helvetica', 13, 'bold'))
passingsupp = False
else:
passingsupp = True
except ValueError:
sg.Popup("Error: Support threshold must be between 0 and 1.", font=('Helvetica', 13, 'bold'))
passingsupp = False
else:
passingsupp = True
infile = open(values['infilename'], 'r')
outfile = open(values['outfilename'], 'w')
if values['support'] == '':
values['support'] = '-inf'
trees = list()
for line in infile:
if isinstance(line, bytes):
l = line.decode().strip()
else:
l = line.strip()
trees.append(read_tree_newick(l))
# run algorithm
outfile.write("** EPI-ClusT Results **\n")
outfile.write("Input File: %s\n" % values['infilename'])
outfile.write("Support Threshold: %s\n" % values['support'])
for t, tree in enumerate(trees):
if values['df'] is True:
gen_hist(tree, True)
plt.show(block=False)
# plot pairwise distances
visable = False
if values['plothist'] is True:
visable = True
if values['df'] is False:
outfile.write("Genetic Distance Threshold: %s\n" % values['dist'])
if values['rooted'] is True:
clusters = min_clusters_threshold_max_clade(tree, float(values['dist']), float(values['support']))
else:
clusters = min_clusters_threshold_max(tree, float(values['dist']), float(values['support']))
else:
d = float(sg.PopupGetText("Enter distance upperbound:\nThe best genetic distance up through this threshold will be computed.\nIf you are unsure, click 'Ok' to use the default upperbound of 0.10.",title='Enter Distance Upperbound',default_text="0.10", font=('Helvetica', 13)))
if values['rooted'] is True:
clusters = auto_cluster(min_clusters_threshold_max_clade, tree, float(d), float(values['support']), visable)
else:
clusters = auto_cluster(min_clusters_threshold_max, tree, float(d), float(values['support']), visable)
cluster_num = 1
clust_members = {}
for cluster in clusters:
if len(cluster) > 1:
for l in cluster:
if cluster_num in clust_members:
samplenames = clust_members[cluster_num]
samplenames.append(l)
clust_members[cluster_num] = samplenames
else:
samplenames = [l]
clust_members[cluster_num] = samplenames
cluster_num += 1
totalclusters = clust_members
cluster_num -= 1
outfile.write('Found %s clusters\n\n' % | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train a neural network to classify image patches as street / no street.
Take (patch size)x(patch size) (3 color) patches and classify the center pixel.
If loss doesn't change after the first iterations, you have to re-run the
training.
"""
from __future__ import print_function
# import inspect
import imp
import sys
import os
import logging
import scipy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from . import utils
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def main(hypes_file):
"""
Train a neural network with patches of patch_size x patch_size.
(As given via the module network_path).
Parameters
----------
hypes_file : str
Path to a JSON
test_images_json : str
Path to a JSON which is a list of dicts {'raw': path, 'mask': path}
image_batch_size : int
stride : int
"""
hypes = utils.load_hypes(hypes_file)
print(hypes)
network_path = hypes['segmenter']['network_path']
train_images_json = hypes['data']['train']
image_batch_size = hypes['training']['batchsize']
assert image_batch_size >= 1
assert hypes['training']['stride'] >= 1
t = load_data_raw_images(hypes,
serialization_path=hypes['data']['serialization'],
images_json_path=train_images_json)
features, labels = t
logging.info("len(features)=%i", len(features))
logging.info("features.shape=%s", features[0].shape)
logging.info("labels.shape=%s", labels[0].shape)
assert len(features) > 0
mem_size = (sys.getsizeof(42) * len(features) * features[0].size +
sys.getsizeof(42) * len(labels) * labels[0].size)
logging.info("Loaded %i data images with their labels (approx %s)",
len(features),
utils.sizeof_fmt(mem_size))
class_dict = utils.count_classes(labels)
logging.info("Classes (abs): %s", class_dict)
class_dict_rel = {}
total = sum(count for _, count in class_dict.items())
for item, count in class_dict.items():
class_dict_rel[item] = float(count) / total
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax1.imshow(scipy.misc.toimage(features[0]))
ax2.imshow(scipy.misc.toimage(labels[0]))
ax3.imshow(scipy.misc.toimage(features[0]))
ax3.imshow(labels[0], cmap='jet', alpha=0.5)
plt.show()
logging.info("Classes (rel): %s", class_dict_rel)
logging.info("## Network: %s", network_path)
network = imp.load_source('sst.network', network_path)
logging.info("Fully network: %s", str(hypes['segmenter']['fully']))
# nn_params['code'] = inspect.getsource(network)
# get_features is only called so that the network can be properly generated
# it is not used for training here
if hypes["training"]["one_hot_encoding"]:
label_enc = OneHotEncoder(sparse=False)
label_enc.fit([[i] for i in range(2)]) # len(hypes['classes'])
labeled_patches = get_patches(hypes,
features[:1],
labels[:1],
stride=hypes['training']['stride'])
feats, _ = get_features(hypes, labeled_patches)
net1 = network.generate_nnet(feats)
# Generate training data and run training
for from_img in range(0, len(features), image_batch_size):
to_img = from_img + image_batch_size
logging.info("Training on batch %i - %i of %i total",
from_img,
to_img,
len(features))
labeled_patches = get_patches(hypes,
features[from_img:to_img],
labels[from_img:to_img],
stride=hypes['training']['stride'])
if hypes["training"]["one_hot_encoding"]:
labeled_patches[1] = np.reshape(labeled_patches[1], (-1, 1))
labeled_patches[1] = label_enc.transform(labeled_patches[1])
if hypes['segmenter']['flatten']:
new_l = []
for el in labeled_patches[0]:
new_l.append(el.flatten())
new_l = np.array(new_l)
labeled_patches = (new_l, labeled_patches[1])
logging.info(("labeled_patches[0].shape: %s , "
"labeled_patches[1].shape: %s"),
labeled_patches[0].shape,
labeled_patches[1].shape)
net1 = train_nnet(hypes, labeled_patches, net1)
network.serialize_model(hypes, net1)
def load_data_raw_images(hypes,
serialization_path='data.pickle',
images_json_path='data.json'):
"""
Load color images (3 channels) and labels (as images).
Returns
-------
tuple : (featurevector list, label list)
"""
logging.info("Start loading data...")
data_source = serialization_path + ".npz"
if not os.path.exists(data_source):
# build lists of files which will be read
train_filelist = utils.get_labeled_filelist(images_json_path)
files_data, files_gt = [], []
for train_el in train_filelist:
file_data, file_gt = train_el['raw'], train_el['mask']
files_data.append(file_data)
files_gt.append(file_gt)
# read files (data first)
print("Start reading images: ", end='')
colored_image_features = []
for img_path in files_data:
print('.', end='')
ac = utils.load_color_image_features(img_path)
# if(ac.shape[0] == 188): # TODO: Why is this skipped?
colored_image_features.append(ac)
print('')
xs_colored = np.array(colored_image_features, copy=False)
logging.info("Get dictionary to translate colors to classes...")
col_to_class = {}
default_class = 0
for i, cl in enumerate(hypes['classes']):
for color in cl['colors']:
if color == 'default':
default_class = i
else:
if isinstance(color, list):
color = tuple(color)
col_to_class[color] = i
# read grayscale groundtruth
logging.info("Read groundtruth...")
defaulted_colors = set()
yl = []
for f in files_gt:
img = scipy.misc.imread(f, mode='RGB')
new_img = np.zeros((img.shape[0], img.shape[1]), dtype=int)
for i, row in enumerate(img):
for j, pixel in enumerate(row):
pixel = tuple(pixel)
if pixel in col_to_class:
new_img[i][j] = col_to_class[pixel]
else:
defaulted_colors.add(pixel)
new_img[i][j] = default_class
print(" %s" % f)
yl.append(new_img)
# yl = np.array(yl, dtype=int) # Images can have different dimensions
logging.info("Those colors were defaulted: %s", defaulted_colors)
assert len(xs_colored) == len(yl), "len(xs_colored) != len(yl)"
for i, (X, y) in enumerate(zip(xs_colored, yl), start=1):
logging.info("Get labels (%i/%i)...", i, len(yl))
# scipy.misc.imshow(X)
# scipy.misc.imshow(y)
assert X.shape[:2] == y.shape, \
("X.shape[1:]=%s and y.shape=%s" %
(X.shape[:2], y.shape))
assert min(y.flatten()) == 0.0, \
("min(y)=%s" % str(min(y.flatten())))
assert max(y.flatten()) == 1.0, \
("max(y)=%s" % str(max(y.flatten())))
np.savez(serialization_path, xs_colored, yl)
else:
logging.info("!! Loaded pickled data" + "!" * 80)
logging.info("Data source: %s", data_source)
logging.info("This implies same test / training split as before.")
npzfile = np.load(data_source)
xs_colored = npzfile['arr_0']
yl = npzfile['arr_1']
return (xs_colored, yl)
def get_patches(hypes, xs, ys, stride):
"""
Get a list of tuples (patch, label).
Where label is int (1=street, 0=no street) and patch is a 2D-array of
floats.
Parameters
----------
hypes : dict
All relevant parameters of the model (e.g. patch_size and fully)
xs : list
Each element is an image with 3 channels (RGB), but normalized to
[-1, 1]
ys : list
Each element is either 0 or 1
stride : int
The smaller this value, the more patches will be created.
Returns
-------
tuple : (patches, labels)
Two lists of same length. Patches is
"""
patch_size = hypes['segmenter']['patch_size']
fully = hypes['segmenter']['fully']
assert stride >= 1, "Stride must be at least 1"
assert (patch_size) >= 1, "Patch size has to be >= 1"
assert patch_size % 2 == 1, "Patch size should be odd"
assert xs[0].shape[0] >= patch_size and xs[0].shape[1] >= patch_size, \
("Patch is too big for this image: img.shape = %s" % str(xs[0].shape))
logging.info("Get patches of size: %i", patch_size)
patches, labels = [], []
for X, y in zip(xs, ys):
px_left_patchcenter = (patch_size - 1) / 2
start_x = px_left_patchcenter
end_x = X.shape[0] - px_left_patchcenter
start_y = start_x
end_y = X.shape[1] - px_left_patchcenter
for patch_center_x in range(start_x, end_x + 1, stride):
for patch_center_y in range(start_y, end_y + 1, stride):
# Get patch from original image
x_new = X[patch_center_x - px_left_patchcenter:
patch_center_x + px_left_patchcenter + 1,
patch_center_y - px_left_patchcenter:
patch_center_y + px_left_patchcenter + 1,
:]
if x_new.shape != (patch_size, patch_size, 3):
# Patch was at the right / bottom border
print("Skip patch of shape %s" % str(x_new.shape))
continue
if fully:
# Get Labels of the patch and flatt it to 1D
# x1 = patch_center_x - px_left_patchcenter
# x2 = patch_center_x + px_left_patchcenter + 1
# y1 = patch_center_y - px_left_patchcenter
# y2 = patch_center_y + px_left_patchcenter + 1
l = y[patch_center_x - px_left_patchcenter:
patch_center_x + px_left_patchcenter + 1,
patch_center_y - px_left_patchcenter:
patch_center_y + px_left_patchcenter + 1]
labels.append(l.flatten())
patches.append(x_new)
else:
labels.append(y[patch_center_x][patch_center_y])
patches.append(x_new)
assert len(patches) == len(labels), "len(patches) != len(labels)"
logging.info("%i patches were generated.", len(patches))
logging.info("Data before make_equal: %i", len(labels))
if 'make_equal' in hypes['training'] and hypes['training']['make_equal']:
patches, labels = utils.make_equal(patches, labels)
# logging.info(labels.shape)
logging.info("Data after make_equal: %i", len(labels))
if fully:
return [np.array(patches, dtype=np.float32),
np.array(labels, dtype=np.float32)] # fully needs float labels
else:
return [np.array(patches, dtype=np.float32),
np.array(labels, dtype=np.int32)]
def get_features(hypes, labeled_patches):
"""
Get ready-to-use features from labeled patches.
Parameters
----------
hypes : dict
labeled_patches : tuple (patches, labels)
Returns
-------
tuple (feats, y)
list of feature vectors and list of labels
"""
feats = labeled_patches[0]
y = labeled_patches[1]
if not hypes['segmenter']['fully']:
counter = {}
for label in y:
if not isinstance(label, int) and not isinstance(label, np.int32):
label = tuple([int(el) for el in list(label)])
if label in counter:
counter[label] += 1
else:
counter[label] = 1
logging.info("Label distribution: %s", counter)
logging.info("Feature vectors: %i", len(y))
if not hypes['segmenter']['flatten']:
# original shape: (25, 25, 3)
# desired shape: (3, 25, 25)
feats_new = []
for ac in feats:
c = []
c.append(ac[:, :, 0])
c.append(ac[:, :, 1])
c.append(ac[:, :, 2])
feats_new.append(c)
feats = np.array(feats_new, dtype=np.float32)
return (feats, y)
def train_nnet(hypes, labeled_patches, net1):
"""
Train a neural network classifier on the patches.
Parameters
----------
hypes : dict
labeled_patches : tuple (patches, labels)
net1 : model object
Returns
-------
trained classifier
"""
feats, y = get_features(hypes, labeled_patches)
print("##### y.shape: %s" % str(y.shape))
print("##### feats type: %s" % type(feats))
print("##### feats.shape: %s" % str(feats.shape))
net1.fit(feats, y)
return net1
def get_parser():
"""Get parser object."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--hypes",
dest="hypes_file",
type=str,
required=True,
help=("path to a JSON file with "
"contains 'data' (with 'train' and 'test') as "
"well as 'classes' (with 'colors' for each)"))
return parser
if __name__ == '__main__':
args = | |
import re
from random import randint as r
FACE_MOVES = ["U", "D", "F", "B", "L", "R"]
SLICE_MOVES = ["M", "E", "S"]
WEDGE_MOVES = ["u", "d", "f", "b", "l", "r", "Uw", "Dw", "Fw", "Bw", "Lw", "Rw"]
ROTATIONS = ["x", "y", "z"]
move_dict = {"U": 0, "L": 1, "F": 2, "R": 3, "B": 4, "D": 5, "E": 0, "M": 1, "S": 2, "x": 0, "y": 1, "z": 2, "u": 0,
"d": 1, "f": 2, "b": 3, "l": 4, "r": 5}
ALL_MOVES = FACE_MOVES + SLICE_MOVES + WEDGE_MOVES + ROTATIONS
regex_str = "^(" + "|".join(ALL_MOVES) + ")['|2|3]{0,2}$"
##Replaced ? with {0,2}. This is not completely right because it can do U22
MOVE_REGEX = re.compile(regex_str)
FACES = {'U': 0, 'L': 1, 'F': 2, 'R': 3, 'B': 4, 'D': 5}
class Algorithm(object):
"""
Algorithm objects are created
"""
def __init__(self, alg):
assert (valid_alg(alg))
moves = alg.split()
self.move_count = 0
self.moves = []
for move in moves:
m = Move(move)
if m.letter in FACE_MOVES or m.letter in WEDGE_MOVES:
self.move_count += 1
elif m.letter in SLICE_MOVES:
self.move_count += 2
self.moves.append(m)
def __repr__(self):
return " ".join([str(m) for m in self.moves])
def num_moves(self):
return self.move_count
def solution(self, sol):
c = Cube()
c.apply_alg(self)
c.apply_alg(sol)
return c.solved()
def invert(self):
inverse_moves = []
for m in self.moves[::-1]:
inverse_moves.append(m.invert())
return Algorithm(" ".join(inverse_moves))
class Move(object):
def __init__(self, move):
if (len(move) == 1):
self.num = 1
self.letter = move
elif (len(move) == 2):
self.letter = move[:1]
rest = move[1:]
if rest == "w":
self.letter = self.letter.lower()
self.num = 1
if rest == "'":
self.num = 3
if rest == "2":
self.num = 2
elif (len(move) == 3):
rest = move[1:]
if rest == '2\'':
# added by me -Weston
self.letter = move[:1]
self.num = 2
else:
self.letter = move[:1].lower()
rest = move[2:]
if rest == "'":
self.num = 3
if rest == "2":
self.num = 2
def __repr__(self):
if self.num == 1:
return self.letter
if self.num == 2:
return self.letter + "2"
if self.num == 3:
return self.letter + "'"
def invert(self):
inverse = ""
inverse += self.letter
if self.num == 1:
inverse += "'"
elif self.num == 2:
inverse += "2"
return inverse
class Cube(object):
"""
A 3-dimensional array representation of a Rubik's cube. Act on it by calling
c.apply_alg(alg) where alg is an Algorithm object. Additionally, check if it
is solved with c.solved().
"""
def __init__(self):
self.cube = [[[i for _ in range(3)] for _ in range(3)] for i in range(6)]
def __repr__(self):
return str(self.cube)
def solved(self):
for face in range(6):
if len(set(self.cube[face][0]).union(set(self.cube[face][1])).union(set(self.cube[face][2]))) > 1:
return False
return True
##All the f2L stuff concerns last slot
def f2l_solved(self):
if self.slot_corner_solved() and self.slot_edge_solved():
return True
return False
def ll_oriented(self):
face = FACES['U']
if len(set(self.cube[face][0]).union(set(self.cube[face][1])).union(set(self.cube[face][2]))) > 1:
return False
return True
def ll_edges_oriented(self):
face = FACES['U']
correct_color = self.cube[face][1][1]
if self.cube[face][0][1] != correct_color:
return False
if self.cube[face][1][0] != correct_color:
return False
if self.cube[face][1][2] != correct_color:
return False
if self.cube[face][2][1] != correct_color:
return False
return True
def last_5_edges_oriented(self):
if self.ll_edges_oriented():
return True
face = FACES['U']
correct_color = self.cube[face][1][1]
counter = 0
if self.cube[face][0][1] == correct_color:
counter += 1
if self.cube[face][1][0] == correct_color:
counter += 1
if self.cube[face][1][2] == correct_color:
counter += 1
if self.cube[face][2][1] == correct_color:
counter += 1
if counter == 3 and self.cube[FACES['F']][1][2] == correct_color:
return True;
return False
def slot_corner_solved(self):
if not self.f2l_minus_1_solved:
return False
face = FACES['F']
if self.cube[face][1][1] != self.cube[face][2][2]:
return False
face = FACES['R']
if self.cube[face][1][1] != self.cube[face][2][0]:
return False
face = FACES['D']
if self.cube[face][1][1] != self.cube[face][0][2]:
return False
return True
def three_move_insert_exists(self):
if self.join_insert():
return True
result = False
for i in range(4):
self.apply_alg(Algorithm('R U\' R\''))
if self.f2l_solved():
result = True
self.apply_alg(Algorithm('R U R\' U'))
return result
def join_insert(self):
result = False
for i in range(4):
self.apply_alg(Algorithm('R U R\''))
if self.f2l_solved():
result = True
self.apply_alg(Algorithm('R U\' R\' U'))
return result
def slot_edge_solved(self):
if not self.f2l_minus_1_solved():
return False
face = FACES['F']
if self.cube[face][1][1] != self.cube[face][1][2]:
return False
face = FACES['R']
if self.cube[face][1][1] != self.cube[face][1][0]:
return False
return True
def f2l_minus_1_solved(self):
face = FACES['D']
correct_color = self.cube[face][1][1]
if not self.bottom_two_rows_correct_color(face):
return False
if self.cube[face][0][0] != correct_color:
return False
if self.cube[face][0][1] != correct_color:
return False
if not self.bottom_two_rows_correct_color(FACES['B']):
return False
if not self.bottom_two_rows_correct_color(FACES['L']):
return False
face = FACES['F']
correct_color = self.cube[face][1][1]
if self.cube[face][1][0] != correct_color:
return False
if self.cube[face][2][0] != correct_color:
return False
if self.cube[face][2][1] != correct_color:
return False
face = FACES['R']
correct_color = self.cube[face][1][1]
if self.cube[face][1][2] != correct_color:
return False
if self.cube[face][2][1] != correct_color:
return False
if self.cube[face][2][2] != correct_color:
return False
return True
def bottom_two_rows_correct_color(self, face):
correct_color = self.cube[face][1][1]
for i in range(3):
if self.cube[face][1][i] != correct_color:
return False
for i in range(3):
if self.cube[face][2][i] != correct_color:
return False
return True
def dump(self):
for face in range(6):
print
'---'
print
self.cube[face]
print
'----'
def _cycle_stickers(self, *args):
t = self.cube[args[len(args) - 1][0]][args[len(args) - 1][1]][args[len(args) - 1][2]]
loop = reversed(range(len(args)))
for i in loop:
if i > 0:
self.cube[args[i][0]][args[i][1]][args[i][2]] = self.cube[args[i - 1][0]][args[i - 1][1]][
args[i - 1][2]]
self.cube[args[0][0]][args[0][1]][args[0][2]] = t
def _cycle_rows(self, *args):
t = self.cube[args[len(args) - 1][0]][args[len(args) - 1][1]]
loop = reversed(range(len(args)))
for i in loop:
if i > 0:
self.cube[args[i][0]][args[i][1]] = self.cube[args[i - 1][0]][args[i - 1][1]]
self.cube[args[0][0]][args[0][1]] = t
def _rotate_face(self, face):
# rotate the stickers on the face
self._cycle_stickers([face, 0, 0], [face, 0, 2], [face, 2, 2], [face, 2, 0])
self._cycle_stickers([face, 0, 1], [face, 1, 2], [face, 2, 1], [face, 1, 0])
# U
if face == 0:
self._cycle_rows([4, 0], [3, 0], [2, 0], [1, 0])
# L
elif face == 1:
self._cycle_stickers([0, 0, 0], [2, 0, 0], [5, 0, 0], [4, 2, 2])
self._cycle_stickers([0, 1, 0], [2, 1, 0], [5, 1, 0], [4, 1, 2])
self._cycle_stickers([0, 2, 0], [2, 2, 0], [5, 2, 0], [4, 0, 2])
# F
elif face == 2:
self._cycle_stickers([0, 2, 0], [3, 0, 0], [5, 0, 2], [1, 2, 2])
self._cycle_stickers([0, 2, 1], [3, 1, 0], [5, 0, 1], [1, 1, 2])
self._cycle_stickers([0, 2, 2], [3, 2, 0], [5, 0, 0], [1, 0, 2])
# R
elif face == 3:
self._cycle_stickers([0, 2, 2], [4, 0, 0], [5, 2, 2], [2, 2, 2])
self._cycle_stickers([0, 1, 2], [4, 1, 0], [5, 1, 2], [2, 1, 2])
self._cycle_stickers([0, 0, 2], [4, 2, 0], [5, 0, 2], [2, 0, 2])
# B
elif face == 4:
self._cycle_stickers([0, 0, 0], [1, 2, 0], [5, 2, 2], [3, 0, 2])
self._cycle_stickers([0, 0, 1], [1, 1, 0], [5, 2, 1], [3, 1, 2])
self._cycle_stickers([0, 0, 2], [1, 0, 0], [5, 2, 0], [3, 2, 2])
# D
elif face == 5:
self._cycle_rows([1, 2], [2, 2], [3, 2], [4, 2])
def slice(self, axis):
# E
if axis == 0:
self._cycle_rows([1, 1], [2, 1], [3, 1], [4, 1])
# M
elif axis == 1:
self._cycle_stickers([0, 0, 1], [2, 0, 1], [5, 0, 1], [4, 2, 1])
self._cycle_stickers([0, 1, 1], [2, 1, 1], [5, 1, 1], [4, 1, 1])
self._cycle_stickers([0, 2, 1], [2, 2, 1], [5, 2, 1], [4, 0, 1])
# S
elif axis == 2:
self._cycle_stickers([0, 1, 0], [1, 2, 1], [5, 1, 2], [3, 0, 1])
self._cycle_stickers([0, 1, 1], [1, 1, 1], [5, 1, 1], [3, 1, 1])
self._cycle_stickers([0, 1, 2], [1, 0, 1], [5, 1, 0], [3, 2, 1])
def rotate(self, axis):
# x
if axis == 0:
self.apply_move(Move("R"))
self.apply_move(Move("L'"))
self.apply_move(Move("M'"))
# y
elif axis == 1:
self.apply_move(Move("U"))
self.apply_move(Move("E'"))
self.apply_move(Move("D'"))
# z
elif axis == 2:
self.apply_move(Move("B'"))
self.apply_move(Move("F"))
self.apply_move(Move("S'"))
def rotate_wedge(self, face):
# u / Uw
if face == 0:
self.apply_move(Move("U"))
self.apply_move(Move("E'"))
# d / Dw
elif face == 1:
self.apply_move(Move("D"))
self.apply_move(Move("E"))
# f / Fw
elif face == 2:
self.apply_move(Move("F"))
self.apply_move(Move("S'"))
# b / Bw
elif face == 3:
self.apply_move(Move("B"))
self.apply_move(Move("S"))
# l / Lw
elif face == 4:
self.apply_move(Move("L"))
self.apply_move(Move("M"))
# r / Rw
elif face == 5:
self.apply_move(Move("R"))
self.apply_move(Move("M'"))
def apply_alg(self, alg):
for move in alg.moves:
self.apply_move(move)
def apply_move(self, move):
if move.letter in FACE_MOVES:
for _ in range(move.num):
self._rotate_face(move_dict[move.letter])
elif move.letter in WEDGE_MOVES:
for _ in range(move.num):
self.rotate_wedge(move_dict[move.letter])
elif | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Brew-file: Manager for packages of Homebrew
https://github.com/rcmdnk/homebrew-file
requirement: Python 2.7 or later
"""
from __future__ import print_function
import os
import sys
__prog__ = os.path.basename(__file__)
__description__ = __doc__
__author__ = "rcmdnk"
__copyright__ = "Copyright (c) 2013 rcmdnk"
__credits__ = ["rcmdnk"]
__license__ = "MIT"
__version__ = "v5.1.0"
__date__ = "5/Jan/2018"
__maintainer__ = "rcmdnk"
__email__ = "<EMAIL>"
__status__ = "Prototype"
def my_decode(word): # pragma: no cover
"""Decode when python3 is used."""
if sys.version_info.major > 2:
return word.decode()
return word
def my_input(word): # pragma: no cover
"""Input method compatibility."""
if sys.version_info.major > 2:
return input(word)
return raw_input(word)
def open_output_file(name, mode="w"):
"""Helper function to open a file even if it doesn't exist."""
if os.path.dirname(name) != "" and \
not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
return open(name, mode)
def to_bool(val):
if type(val) == bool:
return val
elif type(val) == int or (type(val) == str and val.isdigit()):
return bool(int(val))
elif type(val) == str:
if val.lower() == "true":
return True
else:
return False
else: # pragma: no cover
return False
class Tee:
"""Module to write out in two ways at once."""
def __init__(self, out1, out2=sys.stdout, use2=True):
"""__init__"""
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
from io import StringIO
if type(out1) == str:
self.out1name = out1
self.out1 = StringIO()
else:
self.out1name = ""
self.out1 = out1
self.use2 = use2
if self.use2:
if type(out2) == str:
self.out2name = out2
self.out2 = StringIO()
else:
self.out2name = ""
self.out2 = out2
def __del__(self):
"""__del__"""
if self.out1name != "":
self.out1.close()
if self.use2:
if self.out2name != "":
self.out2.close()
def write(self, text):
"""Write w/o line break."""
self.out1.write(text)
if self.use2:
self.out2.write(text)
def writeln(self, text):
"""Write w/ line break."""
self.out1.write(text + "\n")
if self.use2:
self.out2.write(text + "\n")
def flush(self):
"""Flush the output"""
self.out1.flush()
if self.use2:
self.out2.flush()
def close(self):
"""Close output files."""
if self.out1name != "":
f = open_output_file(self.out1name, "w")
f.write(self.out1.getvalue())
f.close()
if self.use2:
if self.out2name != "":
f = open(self.out2name, "w")
f.write(self.out2.getvalue())
f.close()
self.__del__()
class BrewHelper:
"""Helper functions for BrewFile."""
def __init__(self, opt):
self.opt = opt
def readstdout(self, proc):
while True:
line = my_decode(proc.stdout.readline()).rstrip()
code = proc.poll()
if line == '':
if code is not None:
break
else: # pragma: no cover
continue
yield line
def proc(self, cmd, print_cmd=True, print_out=True,
exit_on_err=True, separate_err=False, print_err=True, shell=False,
verbose=1, env={}):
""" Get process output."""
import shlex
import subprocess
if type(cmd) != list:
cmd = shlex.split(cmd)
cmd_orig = " ".join(["$"] + cmd)
if cmd[0] == "brew":
cmd = ["command"] + cmd
if print_cmd:
self.info(cmd_orig, verbose)
if shell:
cmd = ' '.join(cmd)
all_env = os.environ.copy()
for k, v in env.items():
all_env[k] = v
lines = []
try:
if separate_err:
if print_err:
stderr = None
else:
stderr = open(os.devnull, 'w')
else:
stderr = subprocess.STDOUT
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr,
env=all_env, shell=shell)
if separate_err and not print_err:
stderr.close()
for line in self.readstdout(p):
lines.append(line)
if print_out:
self.info(line, verbose)
ret = p.wait()
except OSError as e:
if print_out:
lines = [" ".join(cmd) + ": " + str(e)]
self.info(lines[0].strip(), verbose)
ret = -1
if exit_on_err and ret != 0:
if not (print_out and self.opt["verbose"] >= verbose):
print("\n".join(lines))
sys.exit(ret)
return (ret, lines)
def info(self, text, verbose=2):
if self.opt["verbose"] < verbose:
return
print(text)
def warn(self, text, verbose=1):
self.info("\033[33;1m" + text + "\033[m", verbose)
def err(self, text, verbose=0):
self.info("\033[31;1m" + text + "\033[m", verbose)
def banner(self, text, verbose=1):
max = 0
for l in text.split("\n"):
if max < len(l):
max = len(l)
self.info("\n"+"#"*max+"\n"+text+"\n" + "#"*max+"\n", verbose)
def brew_val(self, name):
if name not in self.opt:
self.opt[name] = self.proc("brew --" + name, False, False)[1][0]
return self.opt[name]
class BrewInfo:
"""Homebrew information storage."""
def __init__(self, helper, filename=""):
self.brew_input_opt = {}
self.pip_input_opt = {}
self.gem_input_opt = {}
self.brew_input = []
self.tap_input = []
self.cask_input = []
self.pip_input = []
self.gem_input = []
self.appstore_input = []
self.file_input = []
self.before_input = []
self.after_input = []
self.cmd_input = []
self.brew_list_opt = {}
self.pip_list_opt = {}
self.gem_list_opt = {}
self.brew_list = []
self.tap_list = []
self.cask_list = []
self.pip_list = []
self.gem_list = []
self.appstore_list = []
self.file_list = []
self.tap_packs = []
self.tap_casks = []
self.cask_nocask_list = []
self.list_dic = {
"brew_input_opt": self.brew_input_opt,
"pip_input_opt": self.pip_input_opt,
"gem_input_opt": self.gem_input_opt,
"brew_input": self.brew_input,
"tap_input": self.tap_input,
"cask_input": self.cask_input,
"pip_input": self.pip_input,
"gem_input": self.gem_input,
"appstore_input": self.appstore_input,
"file_input": self.file_input,
"before_input": self.before_input,
"after_input": self.after_input,
"cmd_input": self.cmd_input,
"brew_list_opt": self.brew_list_opt,
"pip_list_opt": self.pip_list_opt,
"gem_list_opt": self.gem_list_opt,
"brew_list": self.brew_list,
"tap_list": self.tap_list,
"cask_list": self.cask_list,
"cask_nocask_list": self.cask_nocask_list,
"pip_list": self.pip_list,
"gem_list": self.gem_list,
"appstore_list": self.appstore_list,
"file_list": self.file_list,
"tap_packs": self.tap_packs,
"tap_casks": self.tap_casks,
}
self.filename = filename
self.helper = helper
def set_file(self, filename):
self.filename = filename
def get_file(self):
return self.filename
def get_dir(self):
return os.path.dirname(self.filename)
def check_file(self):
if os.path.exists(self.filename):
return True
else:
return False
def check_dir(self):
if os.path.exists(self.get_dir()):
return True
else:
return False
def clear(self):
self.clear_input()
self.clear_list()
del self.tap_packs[:]
del self.tap_casks[:]
def clear_input(self):
self.brew_input_opt.clear()
self.pip_input_opt.clear()
self.gem_input_opt.clear()
del self.brew_input[:]
del self.tap_input[:]
del self.cask_input[:]
del self.pip_input[:]
del self.gem_input[:]
del self.appstore_input[:]
del self.file_input[:]
del self.before_input[:]
del self.after_input[:]
del self.cmd_input[:]
def clear_list(self):
self.brew_list_opt.clear()
self.pip_list_opt.clear()
self.gem_list_opt.clear()
del self.brew_list[:]
del self.tap_list[:]
del self.cask_list[:]
del self.cask_nocask_list[:]
del self.pip_list[:]
del self.gem_list[:]
del self.appstore_list[:]
del self.file_list[:]
def input_to_list(self):
self.clear_list()
self.brew_list.extend(self.brew_input)
self.brew_list_opt.update(self.brew_input_opt)
self.pip_list_opt.update(self.pip_input_opt)
self.gem_list_opt.update(self.gem_input_opt)
self.tap_list.extend(self.tap_input)
self.cask_list.extend(self.cask_input)
self.pip_list.extend(self.pip_input)
self.gem_list.extend(self.gem_input)
self.appstore_list.extend(self.appstore_input)
self.file_list.extend(self.file_input)
def sort(self):
core = 0
homebrew_taps = []
cask = 0
cask_taps = []
other_taps = []
for t in self.tap_list:
if t == "homebrew/core":
core = 1
elif t.startswith("homebrew/"):
homebrew_taps.append(t)
elif t == "caskroom/cask":
cask = 1
elif t.startswith("caskroom/"):
cask_taps.append(t)
else:
other_taps.append(t)
homebrew_taps.sort()
cask_taps.sort()
other_taps.sort()
self.tap_list = []
if core == 1:
self.tap_list.append("homebrew/core")
self.tap_list += homebrew_taps
if cask == 1:
self.tap_list.append("caskroom/cask")
self.tap_list += cask_taps
self.tap_list += other_taps
self.brew_list.sort()
self.tap_casks.sort()
self.gem_list.sort()
self.appstore_list.sort(
key=lambda x: x.split()[1].lower() if len(x.split()) > 1
else x.split()[0])
def get(self, name):
import copy
return copy.deepcopy(self.list_dic[name])
def remove(self, name, package):
if type(self.list_dic[name]) == list:
self.list_dic[name].remove(package)
elif type(self.list_dic[name]) == dict:
del self.list_dic[name][package]
def set_val(self, name, val):
if type(self.list_dic[name]) == list:
del self.list_dic[name][:]
self.list_dic[name].extend(val)
elif type(self.list_dic[name]) == dict:
self.list_dic[name].clear()
self.list_dic[name].update(val)
def add(self, name, val):
if type(self.list_dic[name]) == list:
self.list_dic[name].extend(val)
elif type(self.list_dic[name]) == dict:
self.list_dic[name].update(val)
def read(self, filename=""):
self.clear_input()
try:
if filename == "":
f = open(self.filename, "r")
else:
f = open(filename, "r")
except IOError:
return False
lines = f.readlines()
f.close()
import re
is_ignore = False
self.tap_input.append("direct")
for l in lines:
if re.match("# *BREWFILE_ENDIGNORE", l):
is_ignore = False
if re.match("# *BREWFILE_IGNORE", l):
is_ignore = True
if is_ignore:
continue
if re.match(" *$", l) is not None or\
re.match(" *#", l) is not None:
continue
args = l.replace("'", "").replace('"', "").\
replace(",", " ").replace("[", "").replace("]", "")
args = self.helper.proc('echo \\"' + args + '\\"', False, False,
False, True, True, shell=True
)[1][0].split()
cmd = args[0]
p = args[1] if len(args) > 1 else ""
if len(args) > 2 and p in ["tap", "cask", "pip", "gem"]:
args.pop(0)
cmd = args[0]
p = args[1]
if self.helper.opt["form"] == "none":
self.helper.opt["form"] = "cmd"
if len(args) > 2 and cmd in ["brew", "cask", "gem"] and \
p == "install":
args.pop(1)
p = args[1]
if self.helper.opt["form"] == "none":
self.helper.opt["form"] = "cmd"
if len(args) > 2:
if args[2] == "args:":
opt = " " + " ".join(["--" + x for x in args[3:]]).strip()
if self.helper.opt["form"] == "none":
self.helper.opt["form"] = "bundle"
else:
opt = " " + " ".join(args[2:]).strip()
else:
opt = ""
excmd = " ".join(l.split()[1:]).strip()
if self.helper.opt["form"] == "none":
if cmd in ["brew", "tap", "tapall", "pip", "gem"]:
if '"' in l or "'" in l:
self.helper.opt["form"] = "bundle"
if cmd == "brew" or cmd == "install":
self.brew_input.append(p)
self.brew_input_opt[p] = (opt)
elif cmd == "tap":
self.tap_input.append(p)
elif cmd == "tapall":
self.tap_input.append(p)
self.get_tap(p)
for tp in self.tap_packs:
self.brew_input.append(tp)
self.brew_input_opt[tp] = ""
elif cmd == "cask":
self.cask_input.append(p)
elif cmd == "pip":
self.pip_input.append(p)
self.pip_input_opt[p] = (opt)
elif cmd == "gem":
self.gem_input.append(p)
self.gem_input_opt[p] = (opt)
elif cmd == "appstore":
self.appstore_input.append(re.sub("^ *appstore *", "", l).
strip().strip("'").strip('"'))
elif cmd == "file" or cmd.lower() == "brewfile":
self.file_input.append(p)
elif cmd == "before":
self.before_input.append(excmd)
elif cmd == "after":
self.after_input.append(excmd)
else:
self.cmd_input.append(l.strip())
def get_tap_path(self, tap):
"""Get tap path"""
if tap == "direct":
return self.helper.brew_val("cache") + "/Formula"
tap_user = os.path.dirname(tap)
tap_repo = os.path.basename(tap)
return self.helper.brew_val("repository") + "/Library/Taps" +\
"/" + tap_user + "/homebrew-" + tap_repo
def get_tap(self, tap):
"""Helper for tap configuration file"""
tap_path = self.get_tap_path(tap)
if not os.path.isdir(tap_path):
return
| |
<reponame>AngelRuizMoreno/Jupyter_Dock_devel
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. <NAME> and TSRI 2016
##
################################################################################
#############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME> TSRI 2016
#
#############################################################################
#
# $Header: /mnt/raid/services/cvs/PmvApp/colorCmds.py,v 1.8.4.3 2017/09/07 00:51:28 annao Exp $
#
# $Id: colorCmds.py,v 1.8.4.3 2017/09/07 00:51:28 annao Exp $
#
"""
This Module implements commands to color the current selection different ways.
for example:
by atoms.
by residues.
by chains.
etc ...
"""
import os, sys
from mglutil.util.colorUtil import ToHEX
from PmvApp.colorPalette import ColorPalette, ColorPaletteFunction
from PmvApp.Pmv import DeleteGeomsEvent, AddGeomsEvent, EditGeomsEvent
from mglutil.events import Event
import numpy
from DejaVu2.colorTool import Map, RGBARamp, RedWhiteARamp, WhiteBlueARamp,\
RedWhiteBlueARamp
from PmvApp.Pmv import MVCommand
from MolKit2.molecule import Molecule, Atom, Residue, Chain
from MolKit2.selection import Selection, SelectionSet
from DejaVu2.colorMap import ColorMap
from opengltk.OpenGL import GL
class ColorCommandBase(MVCommand):
"""Base class for Pmv color commands
"""
_argNames = ['geomsToColor', 'carbonsOnly']
def checkArguments(self, *args, **kw):
for name in kw.keys():
if name not in self._argNames:
raise RuntimeError("%s: unrecognized keyword argument '%s', valid names are %s"%(
self.name, name, str(self._argNames)))
return args, kw
# virtual function that has to return a list of colors for the specified atoms
def getColors(self, atoms):
pass
def doit(self, atoms, geomsToColor=['all',], carbonsOnly=False):
"""None <--- color(selections, geomsToColor=['all'])"""
if carbonsOnly:
atoms = atoms & atoms.select("element C")
pmv = self.app()
if 'all' in geomsToColor:
geomsToColor = pmv.getGeoms(SelectionSet([atoms]))
elif '*' in geomsToColor:
geomsToColor = pmv.getGeoms(SelectionSet([atoms]), visibleOnly=False)
mol = atoms.getAtomGroup().getMolecule()
gc = mol.geomContainer
indices = atoms.getIndices()
bonds = None
for gName in geomsToColor:
if gName=='lines' or gName=='noBond':
col = mol._colors['lines'][mol._ag.getData('colorsIndices_lines').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['lines']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['lines'] = numpy.array(colList)
mol._ag.setData('colorsIndices_lines', colIndexList)
self.app().displayLines.refreshDisplay(mol)
elif gName=='cpk':
col = mol._colors['cpk'][mol._ag.getData('colorsIndices_cpk').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['cpk']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['cpk'] = numpy.array(colList)
mol._ag.setData('colorsIndices_cpk', colIndexList)
self.app().displayCPK.refreshDisplay(mol)
elif gName=='sb':
col = mol._colors['sb_balls'][mol._ag.getData('colorsIndices_sb_balls').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['sb']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['sb_balls'] = numpy.array(colList)
mol._ag.setData('colorsIndices_sb_balls', colIndexList)
mol._colors['sb_cyl'] = numpy.array(colList)
mol._ag.setData('colorsIndices_sb_cyl', colIndexList)
self.app().displaySB.refreshDisplay(mol)
elif gName=='sticks':
col = mol._colors['sb_cyl'][mol._ag.getData('colorsIndices_sb_cyl').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['sticks']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['sb_cyl'] = numpy.array(colList)
mol._ag.setData('colorsIndices_sb_cyl', colIndexList)
self.app().displaySB.refreshDisplay(mol)
elif gName=='atomLabels':
col = mol._colors['atomLabels'][mol._ag.getData('colorsIndices_atomLabels').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['atomLabels']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['atomLabels'] = numpy.array(colList)
mol._ag.setData('colorsIndices_atomLabels', colIndexList)
self.app().labelAtoms.refreshDisplay(mol)
elif gName=='residueLabels':
col = mol._colors['residueLabels'][mol._ag.getData('colorsIndices_residueLabels').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['residueLabels']), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['residueLabels'] = numpy.array(colList)
mol._ag.setData('colorsIndices_residueLabels', colIndexList)
self.app().labelResidues.refreshDisplay(mol)
elif gName.startswith("msms_"):
#molName = gName[5:].split("_surface")[0]
molName = mol.name
col = mol._colors['msms']['%s_surface'%molName][mol._ag.getData('colorsIndices_msms_%s_surface'%molName).tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, [gName]), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['msms']['%s_surface'%molName] = numpy.array(colList)
mol._ag.setData('colorsIndices_msms_%s_surface'%molName, colIndexList)
self.app().displayMSMS.refreshDisplay(mol)
elif gName.startswith("cartoon"):
col = mol._colors['cartoon'][mol._ag.getData('colorsIndices_cartoon').tolist()]
oldCol = col[indices]
self.app().pushUndoCmd( self.app().color, (atoms, oldCol, ['cartoon'] ), {})
col[indices,:3] = self.getColors(atoms)
colList, colIndexList = self.app().indexColors(col)
mol._colors['cartoon'] = numpy.array(colList)
mol._ag.setData('colorsIndices_cartoon', colIndexList)
self.app().displayCartoon.refreshDisplay(mol)
elif gName.startswith("coarseMolSurf"):
print 'NOT YET'
## geom = gc.geoms[gName]
## oldCol = self.getAtomColors(geom).copy()
## self.app().pushUndoCmd( self.app().color, (atoms, oldCol, [gName]), {})
## key = (gName+"colors").replace("-", "")
## col = atoms.getAtomGroup().getData(key)
## col[indices] = allColors #elementColors[atoms.getData('atomicNumber')]
## surfAtomInds = gc.boundGeom[gName]['atoms'].getIndices()
## surfCol = col[surfAtomInds] # colors of surface atoms
## cl_atoms = gc.boundGeom[gName]['cl_atoms']
## material = numpy.take(surfCol, cl_atoms, axis=0).astype('f')
## gc.geoms[gName].Set(materials=material, redo=1, inheritMaterial=False,
## #tagModified=False,
## transparent='implicit')
## mol._ag.setData(key, col)
else:
print 'ERROR: Color %s not implemented'%gName
class ColorCommand(ColorCommandBase):
"""The ColorCommand provide a command for coloring a user specified set of atoms with user specified set of colors.
Synopsis:
None <--- color(atoms, colors, geomsToColor=['all'])
Required Arguments:
nodes --- any set of MolKit2.Selection describing molecular components
colors --- list of rgb or [rgbs] tuple of the same length as atoms
Optional Arguments:
geomsToColor --- list of the name of geometries to color default is ['all']
meaning all graphical representations of atoms (including
hidden ones). Use '*' for all visible representations.
Package : PmvApp
Module : colorCmds
Class : ColorCommand
Command : color
"""
def doit(self, selection, colors, geomsToColor=['all',]):
"""None <--- color(selection, geomsToColor, colors)
colors --- is an array of colors for the selection (len(selection) == len(colors))
"""
assert len(selection)==len(colors)
self._colors = colors
ColorCommandBase.doit(self, selection, geomsToColor)
def getColors(self, atoms):
return self._colors[:, :3]
from .pmvPalettes import elementColors, sequentialColors, rainbow256Colors
class ColorByAtomType(ColorCommandBase):
"""The colorByAtomType command allows the user to color the given geometry representing the given nodes using the atomtype coloring scheme where:N :Blue ; C :Gray ; O : Red ; S : Yellow ; H : Cyan; P: Magenta;UNK:green.
\nPackage : PmvApp
\nModule : colorCmds
\nClass : ColorByAtomType
\nCommand : colorbyAtomType
\nDescription:\n
This coloring scheme gives some information on the atomic composition of
the given nodes.\n
\nSynopsis:\n
None <- colorByAtomType(nodes, geomsToColor=['all'])\n
nodes : any set of MolKit2.Selection describing molecular components\n
geomsToColor: list of the name of geometries to color default is 'all'\n
Keywords: color, atom type\n
"""
def getColors(self, atoms):
return elementColors[atoms.getData('atomicNumber')]
class ColorByMolecule(ColorCommandBase):
"""The colorByMolecules command allows the user to color the given geometries representing the given selection by molecules. A different color is assigned to each molecule. \n
Package : PmvApp \n
Module : colorCmds \n
Class : ColorByMolecule \n
Command : colorByMolecules \n
Synopsis:\n
None <- colorByMolecules(selection, geomsToColor=['all'], carbonsOnly=False)\n
selection --- any set of MolKit2.Selection describing molecular components\n
geomsToColor --- list of the name of geometries to color default is ['all']\n
carbonsOnly --- flag (True, False)
When this flag is set to True only carbon atoms \n
will be assigned color.
Keywords --- color, chain\n
"""
def getColors(self, sel):
mol = sel.getAtomGroup().getMolecule()
return sequentialColors[self.app().Mols.index(mol) % len(sequentialColors)]
class ColorByChain(ColorByMolecule):
"""The colorByChain command allows the user to color the given geometries representing the given selection by chain. A different color is assigned to each chain.
\nPackage : PmvApp
\nModule : colorCmds
\nClass : ColorByChain
\nCommand : colorByChains
\nSynopsis:\n
None <- colorByChains(selection, geomsToColor=['all'], carbonsOnly=False)\n
selection --- any set of MolKit2.Selection describing molecular components\n
geomsToColor --- list of the name of geometries to color default is 'all'\n
Keywords --- color, chain\n
"""
def getColors(self, sel):
mol = sel.getAtomGroup().getMolecule()
#colors = []
colors = numpy.array([[1.,1.,1.]]*len(mol._ag), 'f')
chnum = 0
for chain in mol._ag.iterChains():
selChain = sel & chain
#colors.extend( [sequentialColors[chnum%len(sequentialColors)].tolist()]*len(selChain))
chinds = selChain.getIndices()
colors[chinds] = sequentialColors[chnum%len(sequentialColors)]
chnum += 1
return colors[sel.getIndices()]
class ColorRainbow(ColorByMolecule):
"""The RainbowColor command colors molecules using a rainbow color map.\n
Package : PmvApp \n
Module : colorCmds \n
Class : ColorRainbow \n
Command : colorRainbow \n
Synopsis:\n
None <- colorRainbow(selection, geomsToColor=['all']) \n
selection --- atom selection \n
geomsToColor --- list of geometries (names) to be colored
"""
def getColors(self, sel):
mol = sel.getAtomGroup().getMolecule()
indices = sel.getIndices()
colors = Map(indices, rainbow256Colors, 0, len(mol._ag))
return colors
class ColorRainbowChain(ColorByMolecule):
"""The RainbowColorChain command colors molecules using a rainbow color map per chain.\n
Package : PmvApp \n
Module : colorCmds \n
Class : ColorRainbow \n
Command : colorRainbow \n
Synopsis:\n
None <- colorRainbow(selection, geomsToColor=['all']) \n
selection --- atom selection \n
geomsToColor --- list of geometries (names) to be colored
"""
def getColors(self, sel):
mol = sel.getAtomGroup().getMolecule()
#colors = []
chnum = 0
# we build an array of clors spanning all chains
molCol = numpy.ones( (mol._ag.numAtoms(), 3), 'f')
for chain in mol._ag.iterChains():
indices = range(chain.numAtoms())
mini = min(chain.getIndices())
colors = Map(indices, rainbow256Colors, 0, len(indices))
# write rainbown colors for the atoms of this chain into the array
molCol[chain.getIndices()] = colors
#selChain = sel & chain
#colors.extend( colors[selChain.getIndices()].tolist() )
return molCol[sel.getIndices()]
| |
#!/usr/bin/env python
"""Make big QUOCKA cubes"""
from IPython import embed
import schwimmbad
import sys
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from radio_beam import Beam, Beams
from radio_beam.utils import BeamError
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
import au2
import scipy.signal
import numpy as np
from functools import partial
import reproject as rpj
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
# Require reproject >= 0.7
try:
assert float(rpj.__version__[0:3]) >= 0.7
except AssertionError:
print('We require reproject version > 0.7')
print(f'Current version is {rpj.__version__}')
print('Please update reproject!')
quit()
class Error(Exception):
"""Base class for other exceptions"""
pass
class GridError(Error):
"""Raised when grid is too coarse for the convolving beam"""
pass
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return np.ceil(n * multiplier) / multiplier
def my_ceil(a, precision=0):
return np.round(a + 0.5 * 10**(-precision), precision)
def getmaxbeam(file_dict, tolerance=0.0001, nsamps=200, epsilon=0.0005, verbose=False):
"""Find common beam
Arguments:
file_dict {dict} -- Filenames for each bandcube.
Keyword Arguments:
tolerance {float} -- See common_beam (default: {0.0001})
nsamps {int} -- See common_beam (default: {200})
epsilon {float} -- See common_beam (default: {0.0005})
verbose {bool} -- Verbose output (default: {False})
Returns:
cmn_beam {Beam} -- Common beam
"""
if verbose:
print('Finding common beam...')
stokes = ['i', 'q', 'u', 'v']
beam_dict = {}
beams = []
for stoke in stokes:
for i, file in enumerate(file_dict[stoke]):
header = fits.getheader(file, memmap=True)
if stoke == 'i' and i == 0:
target_header = header
beam = Beam.from_fits_header(header)
beams.append(beam)
beams = Beams(
[beam.major.value for beam in beams]*u.deg,
[beam.minor.value for beam in beams]*u.deg,
[beam.pa.value for beam in beams]*u.deg
)
try:
cmn_beam = beams.common_beam(
tolerance=tolerance, epsilon=epsilon, nsamps=nsamps)
except BeamError:
if verbose:
print("Couldn't find common beam with defaults")
print("Trying again with smaller tolerance")
cmn_beam = beams.common_beam(
tolerance=tolerance*0.1, epsilon=epsilon, nsamps=nsamps)
cmn_beam = Beam(
major=my_ceil(cmn_beam.major.to(u.arcsec).value, precision=0)*u.arcsec,
minor=my_ceil(cmn_beam.minor.to(u.arcsec).value, precision=0)*u.arcsec,
pa=round_up(cmn_beam.pa.to(u.deg), decimals=2)
)
dx = target_header['CDELT1']*-1*u.deg
dy = target_header['CDELT2']*u.deg
assert abs(dx) == abs(dy)
grid = dy
conbeams = [cmn_beam.deconvolve(beam) for beam in beams]
# Check that convolving beam will be nyquist sampled
min_samps = []
for b_idx, conbeam in enumerate(conbeams):
# Get maj, min, pa
samp = conbeam.minor / grid.to(u.arcsec)
if samp < 2:
min_samps.append([samp, b_idx])
if len(min_samps) > 0:
print('Adjusting common beam to be sampled by grid!')
worst_idx = np.argmin([samp[0] for samp in min_samps], axis=0)
samp_cor_fac, idx = 2 / \
min_samps[worst_idx][0], int(
min_samps[worst_idx][1])
conbeam = conbeams[idx]
major = conbeam.major
minor = conbeam.minor*samp_cor_fac
pa = conbeam.pa
# Check for small major!
if major < minor:
major = minor
pa = 0*u.deg
cor_beam = Beam(major, minor, pa)
if verbose:
print('Smallest common beam is:', cmn_beam)
cmn_beam = beams[idx].convolve(cor_beam)
cmn_beam = Beam(
major=my_ceil(cmn_beam.major.to(u.arcsec).value, precision=1)*u.arcsec,
minor=my_ceil(cmn_beam.minor.to(u.arcsec).value, precision=1)*u.arcsec,
pa=round_up(cmn_beam.pa.to(u.deg), decimals=2)
)
if verbose:
print('Smallest common Nyquist sampled beam is:', cmn_beam)
return cmn_beam
def writecube(data, beam, stoke, field, outdir, verbose=False):
"""Write cubes to disk
Arguments:
data {dict} -- Image and frequency data and metadata
beam {Beam} -- New common resolution
stoke {str} -- Stokes parameter
field {str} -- Field name
outdir {str} -- Output directory
Keyword Arguments:
verbose {bool} -- Verbose output (default: {False})
"""
# Make filename
outfile = f"{field}.{stoke}.cutout.bigcube.fits"
# Make header
d_freq = np.nanmedian(np.diff(data['freqs']))
header = data['target header']
header = beam.attach_to_header(header)
header['CRVAL3'] = data['freqs'][0].to_value()
header['CDELT3'] = d_freq.to_value()
# Save the data
fits.writeto(f'{outdir}/{outfile}', data['cube'],
header=header, overwrite=True)
if verbose:
print("Saved cube to", f'{outdir}/{outfile}')
if stoke == 'i':
freqfile = f"{field}.bigcube.frequencies.txt"
np.savetxt(f"{outdir}/{freqfile}", data['freqs'].to_value())
if verbose:
print("Saved frequencies to", f"{outdir}/{freqfile}")
def main(pool, args, verbose=False):
"""Main script
"""
# Set up variables
bands = [2100, 5500, 7500]
stokes = ['i', 'q', 'u', 'v']
datadir = args.datadir
field = args.field
if datadir is not None:
if datadir[-1] == '/':
datadir = datadir[:-1]
outdir = args.outdir
if outdir is not None:
if outdir[-1] == '/':
outdir = outdir[:-1]
elif outdir is None:
outdir = datadir
# Glob out files
file_dict = {}
for stoke in stokes:
file_dict.update(
{
stoke: sorted(
glob(f'{datadir}/{field}.*.{stoke}.cutout.bandcube.fits')
)
}
)
file_dict.update(
{
'freqs': sorted(
glob(f'{datadir}/{field}.*.bandcube.frequencies.txt')
)
}
)
# Check files were found
for stoke in stokes:
if len(file_dict[stoke]) == 0:
raise Exception(f'No Stokes {stoke} files found!')
# Get common beam
big_beam = getmaxbeam(file_dict,
tolerance=args.tolerance,
nsamps=args.nsamps,
epsilon=args.epsilon,
verbose=verbose)
bmaj = args.bmaj
bmin = args.bmin
bpa = args.bpa
# Set to largest
if bpa is None and bmin is None and bmaj is None:
bpa = big_beam.pa.to(u.deg)
else:
bpa = 0*u.deg
if bmaj is None:
bmaj = round_up(big_beam.major.to(u.arcsec))
bmaj = big_beam.major.to(u.arcsec)
elif bmaj*u.arcsec < round_up(big_beam.major.to(u.arcsec)):
raise Exception('Selected BMAJ is too small!')
else:
bmaj *= u.arcsec
if bmin is None:
bmin = round_up(big_beam.minor.to(u.arcsec))
bmin = big_beam.minor.to(u.arcsec)
elif bmin*u.arcsec < round_up(big_beam.minor.to(u.arcsec)):
raise Exception('Selected BMIN is too small!')
else:
bmin *= u.arcsec
new_beam = Beam(
bmaj,
bmin,
bpa
)
if verbose:
print('Common beam is', new_beam)
# Start computation - work on each Stokes
stoke_dict = {}
for stoke in stokes:
print(f'Working on Stokes {stoke}...')
datadict = {}
# Get data from files
for band in tqdm(bands, desc='Reading data', disable=(not verbose)):
with fits.open(f'{datadir}/{field}.{band}.{stoke}.cutout.bandcube.fits',
memmap=True,
mode='denywrite') as hdulist:
data = hdulist[0].data
head = hdulist[0].header
freq = np.loadtxt(
f'{datadir}/{field}.{band}.bandcube.frequencies.txt')
datadict.update(
{
band: {
'data': data,
'head': head,
'wcs': WCS(head),
'freq': freq,
'beam': Beam.from_fits_header(head)
}
}
)
target_wcs = datadict[2100]['wcs']
target_header = datadict[2100]['head']
# Regrid
for band in tqdm(bands, desc='Regridding data', disable=(not verbose)):
worker = partial(
rpj.reproject_exact,
output_projection=target_wcs.celestial,
shape_out=datadict[2100]['data'][0].shape,
parallel=False,
return_footprint=False
)
input_wcs = datadict[band]['wcs'].celestial
inputs = [(image, input_wcs) for image in datadict[band]['data']]
newcube = np.zeros_like(datadict[band]['data'])*np.nan
out = list(
tqdm(
pool.imap(
worker, inputs
),
total=len(datadict[band]['data']),
desc='Regridding channels',
disable=(not verbose)
)
)
newcube[:] = out[:]
datadict[band].update(
{
"newdata": newcube
}
)
# Get scaling factors and convolution kernels
for band in tqdm(bands, desc='Computing scaling factors', disable=(not verbose)):
con_beam = new_beam.deconvolve(datadict[band]['beam'])
dx = target_header['CDELT1']*-1*u.deg
dy = target_header['CDELT2']*u.deg
fac, amp, outbmaj, outbmin, outbpa = au2.gauss_factor(
[
con_beam.major.to(u.arcsec).value,
con_beam.minor.to(u.arcsec).value,
con_beam.pa.to(u.deg).value
],
beamOrig=[
datadict[band]['beam'].major.to(u.arcsec).value,
datadict[band]['beam'].minor.to(u.arcsec).value,
datadict[band]['beam'].pa.to(u.deg).value
],
dx1=dx.to(u.arcsec).value,
dy1=dy.to(u.arcsec).value
)
pix_scale = dy
gauss_kern = con_beam.as_kernel(pix_scale)
conbm = gauss_kern.array/gauss_kern.array.max()
datadict[band].update(
{
'conbeam': conbm,
'fac': fac,
'target header': target_header
}
)
datadict.update(
{
'target header': target_header
}
)
# Convolve data
for band in tqdm(bands, desc='Smoothing data', disable=(not verbose)):
smooth = partial(
scipy.signal.convolve,
in2=datadict[band]['conbeam'],
mode='same'
)
sm_data = np.zeros_like(datadict[band]['newdata'])*np.nan
cube = np.copy(datadict[band]['newdata'])
cube[~np.isfinite(cube)] = 0
out = list(tqdm(
pool.imap(
smooth, cube
),
total=len(datadict[band]['newdata']),
desc='Smoothing channels',
disable=(not verbose)
))
sm_data[:] = out[:]
sm_data[~np.isfinite(cube)] = np.nan
datadict[band].update(
{
'smdata': sm_data,
}
)
stoke_dict.update(
{
stoke: datadict
}
)
# Show plots
if args.debug:
plt.figure()
i_mom = np.nansum(datadict[2100]['smdata'], axis=0)
idx = np.unravel_index(np.argmax(i_mom), i_mom.shape)
for band in bands:
x = datadict[band]['freq']
y = datadict[band]['fac'] * \
datadict[band]['smdata'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke} -- band {band}')
if stoke == 'i':
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
# Make cubes
for stoke in tqdm(stokes, desc='Making cubes', disable=(not verbose)):
cube = np.vstack([stoke_dict[stoke][band]['smdata']
* stoke_dict[stoke][band]['fac'] for band in bands])
freq_cube = np.concatenate(
[stoke_dict[stoke][band]['freq'] for band in bands]) * u.Hz
stoke_dict[stoke].update(
{
'cube': cube,
'freqs': freq_cube
}
)
# Show plots
if args.debug:
i_mom = np.nansum(stoke_dict['i']['cube'], axis=0)
idx = np.unravel_index(np.argmax(i_mom), i_mom.shape)
plt.figure()
for stoke in stokes:
x = stoke_dict[stoke]['freqs']
y = stoke_dict[stoke]['cube'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke}')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
plt.figure()
for stoke in stokes:
x = (299792458 / stoke_dict[stoke]['freqs'])**2
y = stoke_dict[stoke]['cube'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke}')
plt.xlabel('$\lambda^2$ [m$^2$]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
if not args.dryrun:
# Save the cubes
for stoke in tqdm(stokes, desc='Writing cubes', disable=(not verbose)):
writecube(stoke_dict[stoke],
new_beam,
stoke,
field,
outdir,
verbose=verbose)
if verbose:
print('Done!')
def cli():
"""Command-line interface
"""
import argparse
# Help string to be shown using the -h option
descStr = """
Produce common resolution cubes for QUOCKA data.
Combines seperate cubes per band into single cube.
Make sure to run makecube.py first!
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'datadir',
metavar='datadir',
type=str,
help='Directory containing a single QUOCKA field images.')
parser.add_argument(
'field',
metavar='field',
type=str,
help='QUOCKA field name.')
parser.add_argument(
'-o',
'--outdir',
dest='outdir',
type=str,
default=None,
help='(Optional) Save cubes to different directory [datadir].')
parser.add_argument(
"--bmaj",
dest="bmaj",
type=float,
default=None,
help="BMAJ (arcsec) to convolve to [max BMAJ from given image(s)].")
parser.add_argument(
"--bmin",
dest="bmin",
type=float,
default=None,
help="BMIN (arcsec) to convolve to [max BMAJ | |
import unittest
from unittest.mock import patch
from scrapy.http import HtmlResponse
from fire_emblem_data_scraper.constants import MAX_NUM_OTHER_IMAGES
from fire_emblem_data_scraper.spiders.characters.characters import CharactersSpider
class TestCharactersSpider(unittest.TestCase):
"""
TestCharactersSpider is a class for unit testing CharactersSpider.
"""
def setUp(self):
"""
Method that executes before each test method.
:return: None
"""
self.spider = CharactersSpider()
@patch('fire_emblem_data_scraper.spiders.characters.characters.scrapy.Request')
def test_when_parsing_response_then_request_is_made_for_each_character_link(self, request_mock):
"""
Tests that a request is made for each link to a Fire Emblem character web page that is found in the given
response when parsing the given response.
:param request_mock: A mock of scrapy.Request
:type request_mock: MagicMock
:return: None
"""
character_links = ['/Byleth', '/Edelgard']
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Fire Emblem Characters</title>
</head>
<body>
<div id="mw-pages">
<div class="mw-category-group">
<ul>
<li>
<a href="{character_links[0]}"></a>
</li>
<li>
<a href="{character_links[1]}"></a>
</li>
</ul>
</div>
</div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
requests = self.spider.parse(response)
for character_link, request in zip(character_links, requests):
character_url = self.spider.BASE_URL + character_link
request_mock.assert_called_with(character_url, callback=self.spider.parse_character)
@patch('fire_emblem_data_scraper.spiders.characters.characters.scrapy.Request')
def test_when_parsing_response_given_next_page_link_is_found_then_request_is_made_for_next_page(self, request_mock):
"""
Tests that a request is made for the next page when parsing the given response, given that a link for the next
page is found in the given response.
:param request_mock: A mock of scrapy.Request
:type request_mock: MagicMock
:return: None
"""
next_page_link = '/next-page'
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Fire Emblem Characters</title>
</head>
<body>
<div id="mw-pages">
<a href="{next_page_link}">next page</a>
</div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
next_page_url = self.spider.BASE_URL + next_page_link
requests = self.spider.parse(response)
for _ in requests:
request_mock.assert_called_with(next_page_url, callback=self.spider.parse)
@patch('fire_emblem_data_scraper.spiders.characters.characters.scrapy.Request')
def test_when_parsing_response_given_next_page_link_is_not_found_then_request_is_not_made_for_next_page(
self, request_mock):
"""
Tests that a request is not made for the next page when parsing the given response, given that a link for the
next page is not found in the given response.
:param request_mock: A mock of scrapy.Request
:type request_mock: MagicMock
:return: None
"""
html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Fire Emblem Characters</title>
</head>
<body>
<div id="mw-pages"></div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
requests = self.spider.parse(response)
for _ in requests:
request_mock.assert_not_called()
def test_when_parsing_character_given_name_is_found_then_name_is_scraped(self):
"""
Tests that the name of the Fire Emblem character is scraped when parsing the given response of the character's
web page, given that the name of the character is found in the given response.
:return: None
"""
name = 'Lucina'
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Lucina</title>
</head>
<body>
<h1 id="firstHeading">{name}</h1>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['name'], name, 'Name was not scraped correctly')
def test_when_parsing_character_given_name_is_found_then_name_is_stripped(self):
"""
Tests that the scraped name of the Fire Emblem character is stripped of leading and trailing whitespace when
parsing the given response of the character's web page, given that the name of the character is found in the
given response.
:return: None
"""
name = 'Lucina'
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Lucina</title>
</head>
<body>
<h1 id="firstHeading"> {name}\n</h1>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['name'], name, 'Name was not scraped correctly')
def test_when_parsing_character_given_name_is_not_found_then_character_is_not_scraped(self):
"""
Tests that a Fire Emblem character item is not scraped when parsing the given response of the character's web
page, given that the name of the Fire Emblem character is not found in the given response.
:return: None
"""
html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<h1 id="firstHeading"></h1>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
result = self.spider.parse_character(response)
self.assertIsNone(result, 'An item was unexpectedly scraped')
def test_when_parsing_character_given_primary_image_is_found_then_images_are_scraped(self):
"""
Tests that images of the Fire Emblem character are scraped correctly when parsing the given response of the
character's web page, given that a primary image is found in the given response. In this scenario, images are
scraped correctly if the primary image found is scraped as the primary image and other images found are scraped
as other images.
:return: None
"""
primary_image_link = '/path-of-radiance-ike.png'
other_image_links = ['/radiant-dawn-ike.jpg', '/fire-emblem-heroes-ike.jpg']
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Ike</title>
</head>
<body>
<h1 id="firstHeading">Ike</h1>
<div class="tab_content" style="display:block;">
<a class="image">
<img src="{primary_image_link}">
</a>
</div>
<div class="tab_content" style="display:none;">
<a class="image">
<img src="{other_image_links[0]}">
</a>
</div>
<div class="tab_content" style="display:none;">
<a class="image">
<img src="{other_image_links[1]}">
</a>
</div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
primary_image_url = self.spider.BASE_URL + primary_image_link
other_image_urls = [self.spider.BASE_URL + other_image_link for other_image_link in other_image_links]
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['primaryImage'], primary_image_url, 'Primary image was not scraped correctly')
self.assertEqual(character_item['otherImages'], other_image_urls, 'Other images were not scraped correctly')
def test_when_parsing_character_given_primary_image_is_not_found_and_other_images_are_found_then_images_are_scraped_with_first_image_found_as_primary_image(
self):
"""
Tests that images of the Fire Emblem character are scraped correctly when parsing the given response of the
character's web page, given that a primary image cannot be found in the given response but other images are
found. In this scenario, images are scraped correctly if the first image found is scraped as the primary image
and other images found are scraped as other images.
:return: None
"""
image_links = ['/thracia776-reinhardt.jpg', '/fire-emblem-heroes-reinhardt.jpg']
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Reinhardt</title>
</head>
<body>
<h1 id="firstHeading">Reinhardt</h1>
<div>
<a class="image">
<img src="{image_links[0]}">
</a>
</div>
<div>
<a class="image">
<img src="{image_links[1]}">
</a>
</div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
primary_image_url = self.spider.BASE_URL + image_links[0]
other_image_urls = [self.spider.BASE_URL + image_links[1]]
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['primaryImage'], primary_image_url, 'Primary image was not scraped correctly')
self.assertEqual(character_item['otherImages'], other_image_urls, 'Other images were not scraped correctly')
def test_when_parsing_character_given_number_of_images_found_is_greater_than_threshold_then_number_of_images_scraped_is_limited_to_threshold(
self):
"""
Tests that the number of images of the Fire Emblem character scraped is limited to the maximum threshold when
parsing the given response of the character's web page, given that the number of images of the character found
in the given response exceeds the maximum threshold.
:return: None
"""
primary_image_link = '/ike.png'
other_image_links = ['/another-ike-1.png', '/another-ike-2.png', '/another-ike-3.png', '/another-ike-4.png',
'/another-ike-5.png', '/another-ike-6.png', '/another-ike-7.png', '/another-ike-8.png',
'/another-ike-9.png', '/another-ike-10.png', '/another-ike-11.png']
other_images_html = ''.join([f'''
<div class="tab_content" style="display:none;">
<a class="image">
<img src="{other_image_link}">
</a>
</div>
''' for other_image_link in other_image_links])
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Ike</title>
</head>
<body>
<h1 id="firstHeading">Ike</h1>
<div class="tab_content" style="display:block;">
<a class="image">
<img src="{primary_image_link}">
</a>
</div>
{other_images_html}
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
primary_image_url = self.spider.BASE_URL + primary_image_link
other_image_urls = [self.spider.BASE_URL + other_image_link for other_image_link in
other_image_links[:MAX_NUM_OTHER_IMAGES]]
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['primaryImage'], primary_image_url, 'Primary image was not scraped correctly')
self.assertEqual(character_item['otherImages'], other_image_urls, 'Other images were not scraped correctly')
def test_when_parsing_character_given_duplicate_images_are_found_then_duplicate_images_are_not_scraped(self):
"""
Tests that duplicate images of the Fire Emblem character are not scraped when parsing the given response of the
character's web page, given that duplicate images of the character are found in the given response.
:return: None
"""
primary_image_link = '/ike-1.png'
other_image_links = ['/ike-2.png', '/ike-1.png', '/ike-2.png']
other_images_html = ''.join([f'''
<div class="tab_content" style="display:none;">
<a class="image">
<img src="{other_image_link}">
</a>
</div>
''' for other_image_link in other_image_links])
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Ike</title>
</head>
<body>
<h1 id="firstHeading">Ike</h1>
<div class="tab_content" style="display:block;">
<a class="image">
<img src="{primary_image_link}">
</a>
</div>
{other_images_html}
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
primary_image_url = self.spider.BASE_URL + primary_image_link
filtered_other_image_links = set(other_image_links)
filtered_other_image_links.remove(primary_image_link)
other_image_urls = [self.spider.BASE_URL + other_image_link for other_image_link in
filtered_other_image_links]
character_item = self.spider.parse_character(response)
self.assertEqual(character_item['primaryImage'], primary_image_url, 'Primary image was not scraped correctly')
self.assertEqual(character_item['otherImages'], other_image_urls, 'Other images were not scraped correctly')
def test_when_parsing_character_given_images_are_not_found_then_images_are_not_scraped(self):
"""
Tests that images of the Fire Emblem character are not scraped when parsing the given response of the
character's web page, given that images of the Fire Emblem character are not found in the given response.
:return: None
"""
html = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Altina</title>
</head>
<body>
<h1 id="firstHeading">Altina</h1>
<div>No images of Altina!</div>
</body>
</html>
'''
response = HtmlResponse(url='', body=html.encode('utf-8'))
character_item = self.spider.parse_character(response)
self.assertNotIn('primaryImage', character_item, 'Primary image was unexpectedly scraped')
self.assertNotIn('otherImages', character_item, 'Other images were unexpectedly scraped')
def test_when_parsing_character_given_appearances_are_found_then_appearances_are_scraped(self):
"""
Tests that appearances of the Fire Emblem character are scraped when parsing the given response of the
character's web page, given that the character's appearances are found in the given response.
:return: None
"""
appearances = ['Fire Emblem: Three Houses', 'Fire Emblem: Heroes', 'Super Smash Bros. Ultimate']
html = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Byleth</title>
</head>
<body>
<h1 id="firstHeading">Byleth</h1>
<table>
<tr>
<th>Appearances</th>
| |
# coding: utf-8
# Import libraries
import gmql as gl
import pandas as pd
from pandas import ExcelWriter
import pickle
import collections
def extract_expression(tumor, platform, gencode_version):
"""
The EXTRACT_EXPRESSION operation extracts expression values from TCGA for all the genes of interest and their candidate regulatory genes. Intermediate results files are exported locally during the execution of the function, while the final dataframes are returned as Pandas dataframes and exported locally in the Excel files 'Gene Expression - InterestGenes.xlsx' and 'Gene Expression - RegulatoryGenes.xlsx'.
:param tumor: full name of the tumor of interest, encoded as a string (e.g. 'Ovarian Serous Cystadenocarcinoma', 'Breast Invasive Carcinoma', ...)
:param platform: number identifying the sequencing platform (either 27 for the 27k probes sequencing platform or 450 for the 450k probes sequencing platform)
:param gencode_version: number representing the GENCODE genomic annotations to use (currently, for assembly GRCh38, versions 22, 24 and 27 can be used)
:return: two Pandas dataframes
Example::
import genereg as gr
expr_interest_df, expr_regul_df = gr.GeneExpression.extract_expression(tumor='Ovarian Serous Cystadenocarcinoma', platform=27, gencode_version=22)
"""
# Check input parameters
tcga_tumors = ["Acute Myeloid Leukemia","Adrenocortical Carcinoma","Bladder Urothelial Carcinoma","Brain Lower Grade Glioma" ,"Breast Invasive Carcinoma","Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma","Cholangiocarcinoma","Colon Adenocarcinoma","Esophageal Carcinoma","Glioblastoma Multiforme","Head and Neck Squamous Cell Carcinoma","Kidney Chromophobe","Kidney Renal Clear Cell Carcinoma","Kidney Renal Papillary Cell Carcinoma","Liver Hepatocellular Carcinoma","Lung Adenocarcinoma","Lung Squamous Cell Carcinoma","Lymphoid Neoplasm Diffuse Large B-cell Lymphoma","Mesothelioma","Ovarian Serous Cystadenocarcinoma","Pancreatic Adenocarcinoma","Pheochromocytoma and Paraganglioma","Prostate Adenocarcinoma","Rectum Adenocarcinoma","Sarcoma","Skin Cutaneous Melanoma","Stomach Adenocarcinoma","Testicular Germ Cell Tumors","Thymoma","Thyroid Carcinoma","Uterine Carcinosarcoma","Uterine Corpus Endometrial Carcinoma","Uveal Melanoma"]
if tumor not in tcga_tumors:
raise ValueError('PATHOLOGY NOT SUPPORTED! You can analyze one of these 33 types of TCGA tumors: '+(', '.join(tcga_tumors)))
if platform not in [27, 450]:
raise ValueError('PLATFORM NOT RECOGNIZED! Sequencing platforms available: 27 and 450')
if gencode_version not in [22, 24, 27]:
raise ValueError('GRCh38 GENCODE versions available are 22, 24 and 27')
# Load the list of genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Create a list containing the Gene Symbols of the genes of interest
genesSYM_of_interest = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in genesSYM_of_interest:
genesSYM_of_interest.append(sym)
# Import the dictionary of genes of interest with their candidate regulatory genes
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Import the gene-TFs mapping dataframe
Mapping_df = pd.read_excel('./0_Genes_Mapping/Genes_Mapping.xlsx',sheetname='Sheet1',header=0,converters={'ENTREZ_GENE_ID':str,'HGNC_ID':str})
# Create a list containing the Gene Symbols of the regulatory genes of genes of interest
regulatory_genesSYM = []
for key, value in dict_RegulGenes.items():
for gene in value:
if gene not in regulatory_genesSYM:
regulatory_genesSYM.append(gene)
# Extract the list of distinct Gene Symbols mapped in the mapping table
mapped_gene_SYMs = []
for index, row in Mapping_df.iterrows():
sym = row['GENE_SYMBOL']
if sym not in mapped_gene_SYMs:
mapped_gene_SYMs.append(sym)
# Execute the query for the extraction of gene expression values on the remote server, using the PyGMQL Python library
gl.set_remote_address('http://gmql.eu/gmql-rest/')
gl.login()
gl.set_mode('remote')
# Load the TCGA datasets to be used in the query
methylation_dataset = gl.load_from_remote(remote_name='GRCh38_TCGA_methylation', owner='public')
expression_dataset = gl.load_from_remote(remote_name='GRCh38_TCGA_gene_expression', owner='public')
# Identify the sequencing platform to be used
if platform == 27:
seq_platform = 'Illumina Human Methylation 27'
elif platform == 450:
seq_platform = 'Illumina Human Methylation 450'
# Extract all the samples for the current tumor and platform
all_methyl = methylation_dataset.meta_select((methylation_dataset['manually_curated__cases__disease_type'] == tumor) & (methylation_dataset['manually_curated__platform'] == seq_platform) & ((methylation_dataset['biospecimen__bio__sample_type'] == 'Primary Tumor') | (methylation_dataset['biospecimen__bio__sample_type'] == 'Recurrent Tumor')) & (methylation_dataset['clinical__shared__history_of_neoadjuvant_treatment'] == 'No'))
all_expr = expression_dataset.meta_select((expression_dataset['manually_curated__cases__disease_type'] == tumor) & ((expression_dataset['biospecimen__bio__sample_type'] == 'Primary Tumor') | (expression_dataset['biospecimen__bio__sample_type'] == 'Recurrent Tumor')) & (expression_dataset['clinical__shared__history_of_neoadjuvant_treatment'] == 'No'))
# Gene Expression:
expr_0 = all_expr.reg_project(field_list=['ensembl_gene_id','entrez_gene_id','gene_symbol','fpkm'])
expr = expr_0.meta_select(semiJoinDataset=all_methyl, semiJoinMeta=['biospecimen__bio__bcr_sample_barcode'])
# Materialize the results into a GDataframe
expr_Gdf = expr.materialize('./(MaterializeResults)')
# The result dataset is loaded as a GDataframe, an object containing two pandas dataframes, one for the region data and one for the metadata.
# Get the two pandas dataframes:
expr_df_regs = expr_Gdf.regs
expr_df_meta = expr_Gdf.meta
n_regs = len(expr_df_regs)
n_samples = len(expr_df_meta)
# Rename 'chr', 'start', and 'stop' columns header
expr_df_regs.rename(columns={'chr':'chrom','start':'left','stop':'right'}, inplace=True)
# Change index into progressive integer numbers and store the name of the sample in another column
expr_df_regs['sample_id'] = expr_df_regs.index
expr_df_regs.index = range(n_regs)
# Convert unknown values (NaN) to empty strings
expr_df_regs = expr_df_regs.fillna('')
# Convert all the metadata values into strings, since they're encode as lists in Python
col_names = []
for name, values in expr_df_meta.iteritems():
col_names.append(name)
for index, row in expr_df_meta.iterrows():
for c in col_names:
list_val = row[c] # it's encoded as a list
str_val = ''.join(list_val) # convert the value stored as a list in a string
expr_df_meta.set_value(index,c,str_val)
# Since we have to extract the expression values for each distinct sample barcode (aliquot), we create a list containing these distinct identifiers
expr_sample_barcodes_all = []
for index, row in expr_df_meta.iterrows():
barcode = row['biospecimen__bio__bcr_sample_barcode']
if barcode not in expr_sample_barcodes_all: # get distinct values
expr_sample_barcodes_all.append(barcode)
# Check which are repeated aliquots, if present
all_aliqouts = []
for index, row in expr_df_meta.iterrows():
barcode = row['biospecimen__bio__bcr_sample_barcode']
all_aliqouts.append(barcode)
multiple_aliquots = [item for item, count in collections.Counter(all_aliqouts).items() if count > 1]
samples_to_remove = []
expr_sample_barcodes = []
if len(multiple_aliquots) != 0:
# Among the repeated aliquots, keep only the most recent ones (of 2013)
for index, row in expr_df_meta.iterrows():
year = row['biospecimen__bio__year_of_shipment']
barcode = row['biospecimen__bio__bcr_sample_barcode']
if (barcode in multiple_aliquots) and year == '2011':
expr_df_meta.drop(index, inplace=True)
samples_to_remove.append(index)
# Import the list of aliquots in the methylation dataset
text_file = open('./3_TCGA_Data/Common_Aliquots.txt', 'r')
aliquots = text_file.read().split('\n')
aliquots.remove('')
text_file.close()
# Extract the new list of distinct TCGA Aliquots to extract
for index, row in expr_df_meta.iterrows():
barcode = row['biospecimen__bio__bcr_sample_barcode']
if barcode in aliquots:
if barcode not in expr_sample_barcodes:
expr_sample_barcodes.append(barcode)
else:
expr_df_meta.drop(index, inplace=True)
samples_to_remove.append(index)
# Remove regions that corresponded to eliminated repeated aliquots
expr_df_regs = expr_df_regs.loc[~(expr_df_regs['sample_id'].isin(samples_to_remove))].copy()
else:
expr_sample_barcodes = expr_sample_barcodes_all
# Export the metadata dataframe setting the TCGA aliquots as indexes.
Metadata_df = expr_df_meta.copy()
Metadata_df['id_sample'] = Metadata_df.index
Metadata_df.set_index('biospecimen__bio__bcr_sample_barcode', inplace=True)
writer = ExcelWriter('./3_TCGA_Data/Gene_Expression/EXPR_(Metadata).xlsx')
Metadata_df.to_excel(writer,'Sheet1')
writer.save()
# Extract from the expression dataset all the regions that belong to genes of interest
expr_df_regs_interest = expr_df_regs.loc[expr_df_regs['gene_symbol'].isin(genesSYM_of_interest)].copy()
# Extract from the expression dataset all the regions that belong to regulatory genes of genes of interest
expr_df_regs_regulatory = expr_df_regs.loc[expr_df_regs['gene_symbol'].isin(regulatory_genesSYM)].copy()
# Gene expression values for each gene of interest:
# Create a dictionary for storing all the gene expression values for each gene of interest and for each aliquot TCGA
from collections import defaultdict
dict_expr_interest = defaultdict(dict)
for key, value in dict_expr_interest.items():
value = defaultdict(list)
# The main dictionary has the Gene Symbols of the genes of interest as keys and each gene has another dictionary as value, which, in turn, has the different aliquots as keys and lists as values.
# The idea is having a list, containing all the fpkm values, for each gene in each TCGA aliquot.
# Set the Gene Symbol as keys of the main dictionary
for name in genesSYM_of_interest:
dict_expr_interest[name] = {}
# Set the names of the samples barcodes as keys for each dictionary set as value of a specific key (genes)
for sample in expr_sample_barcodes:
for k, v in dict_expr_interest.items():
v[sample] = []
# Set the values by appending the expression values for each gene of interest: these expression values (fpkm) can be found in the 'expr_df_regs_interest' dataframe
for index, row in expr_df_regs_interest.iterrows(): # iterating along the whole dataframe
sym = row['gene_symbol'] # get the Gene Symbol of the gene
fpkm = row['fpkm'] # get the gene expression value
sample = row['sample_id'] # get the name of the sample
# get the aliquot corresponding to current sample
aliq = expr_df_meta.get_value(sample, 'biospecimen__bio__bcr_sample_barcode')
# add the value according to the correct gene ID and TCGA aliquot, rounding it to a float with maximum 6 decimal numbers,
dict_expr_interest[sym][aliq].append(round(float(fpkm),6))
# Convert the nested dictionary also into a dataframe
# Create a dataframe whose row indexes are the different TCGA samples and the columns are the distinct genes of interest
expr_interest_df1 = pd.DataFrame(index = expr_sample_barcodes, columns = [genesSYM_of_interest])
# Add three additional columns for the name of the sample and the ID and barcode of the patient corresponding to each aliquot, in order to have them available if we will need it
expr_interest_df2 = pd.DataFrame(index = expr_sample_barcodes, columns = ['Sample_ID','Tumor','Patient_ID'])
# Create the final dataframe
expr_interest_df = expr_interest_df1.join(expr_interest_df2)
# Fill the previously created dataframe with the correct gene expression values, for each gene of interest and for each TCGA aliquot
for gene_sym, dict_value in dict_expr_interest.items():
for tcga_aliq, exp_list in dict_value.items():
if (len(exp_list) != 0):
fpkm = exp_list[0]
# add the expression value in the proper cell of the dataframe, rounding it to a float with maximum 6 decimal numbers
expr_interest_df.set_value(tcga_aliq,gene_sym,round(fpkm,6))
# Add to the dataframe the name of each sample, the tumor code and the patient's ID in correspondence of each TCGA aliquot
for index, row in expr_df_meta.iterrows():
aliquot = row['biospecimen__bio__bcr_sample_barcode']
tumor_tag = row['clinical__admin__disease_code']
patient_id = row['clinical__shared__patient_id']
expr_interest_df.set_value(aliquot,'Sample_ID',index)
expr_interest_df.set_value(aliquot,'Tumor',tumor_tag)
expr_interest_df.set_value(aliquot,'Patient_ID',patient_id)
# Add a row at the beginning of the dataframe to insert also the Entrez Gene ID of each gene of interest
additional_index = ['ENTREZ_GENE_ID']
expr_interest_df0_1 = pd.DataFrame(index = additional_index, columns = [genesSYM_of_interest])
expr_interest_df0_2 = pd.DataFrame(index = additional_index, columns = ['Sample_ID','Tumor','Patient_ID'])
expr_interest_df0 = expr_interest_df0_1.join(expr_interest_df0_2)
frames = [expr_interest_df0, expr_interest_df]
expr_interest_df = pd.concat(frames)
# Add for each | |
'tcp:127.0.0.1:6633'])
subprocess.check_call([self.ovs, self.ovsdb,
'set', 'bridge', 'dp0',
'other-config:datapath-id=7266767372667673'])
log.info("Virtual switch dp0 set up complete !!!")
def AddPort(self, switchName, Port_No):
dp_veth = ''.join(['veth', '-', switchName[:3], '-', str(Port_No)])
vs_port_name = ''.join([switchName, str(Port_No)])
vs_ofp_no = 50 + Port_No
set_ofp_no = 'ofport_request'+'='+str(vs_ofp_no)
AddLink = subprocess.call(['ip', 'link', 'add', dp_veth, 'type',
'veth', 'peer', 'name', vs_port_name])
if AddLink != 0:
try:
subprocess.check_call(['ifconfig', dp_veth])
except subprocess.CalledProcessError:
log.error("Fatal Error!!! Port name:%s, No:%s not created, Quitting",
vs_port_name, vs_ofp_no)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
try:
subprocess.check_call([self.ovs, self.ovsdb, 'add-port', 'dp0',
dp_veth, '--', 'set', 'interface',
dp_veth, set_ofp_no])
log.info("Port: %s added to bridge dp0", vs_port_name)
except subprocess.CalledProcessError:
log.error("Fatal Error!!! Port name:%s, No:%s not added to virtual switch dp0, Quitting",
vs_port_name, vs_ofp_no)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
vsdevices.append(vs_port_name)
subprocess.check_call(['ip', 'link', 'set', vs_port_name, 'up'])
subprocess.check_call(['ip', 'link', 'set', dp_veth, 'up'])
return vs_port_name, vs_ofp_no
def SetIPAddress(self, PortName, addresses):
for address in addresses:
try:
subprocess.check_call(['ip', 'addr', 'add', address,
'dev', PortName])
log.info("Added address %s to interface %s", address, PortName)
except subprocess.CalledProcessError:
log.error("Error while adding %s to interface %s",
address, PortName)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
def DelPort(self, switchName, Port_No):
dp_veth = ''.join(['veth', '-', switchName[:3], '-', str(Port_No)])
vs_port_name = ''.join([switchName, str(Port_No)])
DelLink = subprocess.call(['ip', 'link', 'del', vs_port_name])
if DelLink != 0:
try:
subprocess.check_call(['ifconfig', dp_veth])
log.error("Interface %s not deleted from virtual switch dp0")
sys.exit(1)
except subprocess.CalledProcessError:
pass
else:
try:
subprocess.check_call([self.ovs, self.ovsdb, 'del-port', 'dp0',
dp_veth])
vsdevices.remove(vs_port_name)
log.info("Port: %s deleted from bridge dp0", vs_port_name)
except subprocess.CalledProcessError:
log.error("Error!!! Port name:%s was not deleted from virtual switch dp0",
vs_port_name)
traceback.print_exc(file=sys.stdout)
sys.exit(1)
def AddFastPath(self, fp_int_name, fp_ofp_no):
log.info("Setting up the fastpath interface on dp0")
set_ofp_no = 'ofport_request'+'='+str(fp_ofp_no)
try:
subprocess.check_call([self.ovs, self.ovsdb, 'add-port', 'dp0',
fp_int_name, '--', 'set', 'interface',
fp_int_name, set_ofp_no])
log.info("Port %s with OpenFlow Port number %s added to dp0",
fp_int_name, fp_ofp_no)
except subprocess.CalledProcessError:
log.error("Unsuccessful setup for FastPath interface on dp0")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
def CleanUp(self):
log.info("Removing bridge dp0 ...")
subprocess.call([self.ovs, self.ovsdb, 'del-br', 'dp0'])
log.info("Bridge dp0 removed")
def vsinterfacedelete(self, vs_int_name):
try:
subprocess.check_call(['ip', 'link', 'del', vs_int_name])
log.info("Interfaces %s deleted", vs_int_name)
except subprocess.CalledProcessError:
log.error("Error, interface delete failed")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
class RheaController(app_manager.RyuApp):
''' RheaFlow's main application, the logic of RheaFlow application
is implemented here.
'''
_CONTEXTS = {'switches': switches.Switches, 'netlink': RheaNLSocket,
'RouteReceiver': RheaRouteReceiver}
OFP_VERSIONS = [ofproto.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RheaController, self).__init__(*args, **kwargs)
self.VSManager = VSInterfaceManager()
self.switches = kwargs['switches']
self.receiver = kwargs['RouteReceiver']
self.yamlObj = RheaYAML(config_file)
self.table = Table()
self.labeller = MetaVLAN()
self.pendingroute = []
self.vsif_to_ofp = {}
self.netlink = kwargs['netlink']
self.flowprocessor = RheaFlowProcessor(self.switches)
self.all_fp_entries = self.yamlObj.fetch_fastpath_entries()
log.info("RYU RheaController running.")
self.threads.append(hub.spawn(self.retry_pendingroutes))
self.dp_entries = []
self.isl_switches = []
self.fastpath_configured = False
def configure_datapath(self, dp, dp_id, vs_port_prefix, dp_entry):
ofports_in_dp_entry = dp_entry['ports']
dp_fastpath_port = self.yamlObj.fetch_fpport(dp_entry)
vs_fastpath_port = self.yamlObj.fetch_vsfpport(dp_entry)
interswitch_links = self.yamlObj.fetch_interswitch_links(dp_entry)
if ((dp_fastpath_port is not None) and
(vs_fastpath_port is not None)):
self.table.set_fastpath_switch(dpid_to_str(dp_id),
dp_fastpath_port,
vs_fastpath_port)
self.fastpath_configured = True
if (interswitch_links is not None):
if not self.table.fastpath_switch:
self.isl_switches.append([dp_entry, dp_id, vs_port_prefix])
return
if len(ofports_in_dp_entry) != 0:
for ofp_no, addresses in ofports_in_dp_entry.items():
ofport = self.switches._get_port(dp_id, ofp_no)
if ofport is not None:
port_name = ofport.name
port_hw_addr = ofport.hw_addr
vs_port_name, vs_port_no = self.VSManager.AddPort(vs_port_prefix, ofp_no)
time.sleep(.10)
self.VSManager.SetIPAddress(vs_port_name, addresses)
vs_interface = self.netlink.find_interface_by_name(vs_port_name)
if vs_interface:
vs_port_hw_addr = vs_interface['mac-address']
vs_ifindex = vs_interface['ifindex']
self.vsif_to_ofp[vs_ifindex] = vs_port_no
else:
log.error("Virtual switch interface not found, Mapping not completed, %s not found in interface table",
vs_port_name)
traceback.print_exc(file=sys.stdout)
self.shutdown(1)
log.info("Virtual switch Port %s with OpenFlow port number %s has a mac address of %s",
vs_port_name, vs_port_no, vs_port_hw_addr)
self.table.update_dp_port(dpid_to_str(dp_id),
ofp_no, port_name,
port_hw_addr,
vs_port_name,
vs_port_no,
vs_port_hw_addr)
log.info("OpenFlow port %d on dp_id=%s added to dp0",
ofp_no, dpid_to_str(dp_id))
if ((dp_fastpath_port is not None) and
(vs_fastpath_port is not None)):
fp_label = self.labeller.allocate_label()
self.table.update_fastpath(dpid_to_str(dp_id),
ofp_no, fp_label,
vs_port_no,
dp_fastpath_port,
vs_fastpath_port)
self.flowprocessor.vs_fastpath_flows(fp_label,
vs_fastpath_port,
vs_port_no)
self.flowprocessor.fastpath_flows(dp, fp_label,
ofp_no,
dp_fastpath_port,
vs_port_hw_addr)
log.info("FastPath is enabled, allocating label:%s for port %s on dpid:%s to port %s on the virtual switch using link (dpid:%s,port:%s)->(VS,port:%s)",
fp_label, ofp_no, dpid_to_str(dp_id),
vs_port_no, dpid_to_str(dp_id),
dp_fastpath_port, vs_fastpath_port)
else:
log.info("FastPath is not enabled for port %s on (dpid:%s)", ofp_no,
dpid_to_str(dp_id))
if (interswitch_links is not None):
isl_label = self.labeller.allocate_label()
self.table.update_isl(dpid_to_str(dp_id),
ofp_no, isl_label,
interswitch_links)
self.flowprocessor.ingress_isl_flows(dp, isl_label,
ofp_no, interswitch_links)
if self.table.fastpath_switch:
fastpath_dpid = self.table.fastpath_switch.keys()[0]
dp_fs_port, vs_fs_port = self.table.fastpath_switch[fastpath_dpid]
if fastpath_dpid is not dpid_to_str(dp_id):
for isl_port, remote_dp in interswitch_links.items():
remote_dpid = remote_dp.keys()[0]
remote_dpid_port = remote_dp[remote_dpid]
if remote_dpid == fastpath_dpid:
self.flowprocessor.vs_fastpath_flows(isl_label,
vs_fs_port,
vs_port_no)
fastpath_switch = self.switches._get_switch(str_to_dpid(fastpath_dpid))
fastpath_dp = fastpath_switch.dp
self.flowprocessor.egress_isl_flows(fastpath_dp,
isl_label,
remote_dpid_port,
dp_fs_port)
self.flowprocessor.fastpath_flows(dp, isl_label,
ofp_no,
isl_port,
vs_port_hw_addr)
log.info("Inter-switch link is enabled, allocationg label:%s for port %s on dpid:%s.",
isl_label, ofp_no, dpid_to_str(dp_id))
else:
log.info("Inter-switch link is not enabled for port:%s on (dpid:%s)",
ofp_no, dpid_to_str(dp_id))
if ((dp_fastpath_port is None) and
(vs_fastpath_port is None) and
(interswitch_links is None)):
self.flowprocessor.create_initial_flow(dp,
vs_port_hw_addr,
ofp_no)
else:
log.warn("There are no ports to be mapped for (dp_id=%s) in config",
dpid_to_str(dp_id))
@set_ev_cls(event.EventSwitchEnter, MAIN_DISPATCHER)
def handler_datapath_enter(self, ev):
dp = ev.switch.dp
dp_id = dp.id
log.info("INFO:RheaController:Datapath is up (dp_id=%s)",
dpid_to_str(dp_id))
self.flowprocessor.clear_flows(dp)
if not is_rfvs(dp_id):
dp_entry = self.yamlObj.get_dp_entry(self.yamlObj.configs,
dpid_to_str(dp_id))
if dp_entry is not None:
log.info("INFO:configuring flow tables and installing initial rules on datapath (dp_id=%s)",
dpid_to_str(dp_id))
vs_port_prefix = self.yamlObj.get_vs_port_prefix(self.yamlObj.configs,
dpid_to_str(dp_id))
decrement_ttl = self.yamlObj.dec_ttl_set(self.yamlObj.configs,
dpid_to_str(dp_id))
self.table.update_dp_dec_ttl(dpid_to_str(dp_id), decrement_ttl)
if vs_port_prefix is None:
vs_port_prefix = 'dpid'+str(int(dpid_to_str(dp_id), 16))+'-p'
try:
ofports_in_dp_entry = dp_entry['ports']
except KeyError:
log.error("No 'ports' field was found in the config for (dp_id=%s)",
dpid_to_str(dp_id))
traceback.print_exc(file=sys.stdout)
self.shutdown(1)
vswitch = self.switches._get_switch(str_to_dpid(vs_id))
if vswitch is None:
self.dp_entries.append([dp_entry, dp_id, vs_port_prefix])
return
self.configure_datapath(dp, dp_id, vs_port_prefix, dp_entry)
if self.fastpath_configured is True:
if len(self.isl_switches) != 0:
for isl_switch in self.isl_switches:
dp_entry = isl_switch[0]
dp_id = isl_switch[1]
vs_port_prefix = isl_switch[2]
ofports_in_dp_entry = dp_entry['ports']
switch = self.switches._get_switch(dp_id)
datapath = switch.dp
self.configure_datapath(datapath, dp_id, vs_port_prefix, dp_entry)
self.isl_switches = []
else:
if len(self.dp_entries) != 0:
for entry in self.dp_entries:
dp_entry = entry[0]
dp_id = entry[1]
vs_port_prefix = entry[2]
ofports_in_dp_entry = dp_entry['ports']
switch = self.switches._get_switch(dp_id)
datapath = switch.dp
self.configure_datapath(datapath, dp_id, vs_port_prefix, dp_entry)
self.dp_entries = []
if self.fastpath_configured is True:
if len(self.isl_switches) != 0:
for isl_switch in self.isl_switches:
dp_entry = isl_switch[0]
dp_id = isl_switch[1]
vs_port_prefix = isl_switch[2]
ofports_in_dp_entry = dp_entry['ports']
switch = self.switches._get_switch(dp_id)
datapath = switch.dp
self.configure_datapath(datapath, dp_id, vs_port_prefix, dp_entry)
self.isl_switches = []
vs_fastpath_int, vs_fastpath_port = self.yamlObj.vs_fp_entry()
if ((vs_fastpath_int is None) and (vs_fastpath_port is None) and
(self.table.fastpaths is not None)):
log.warn("No interface was designated for FastPath on the virtual switch")
self.flowprocessor.create_initial_flow(dp)
else:
vs_iface = self.netlink.find_interface_by_name(vs_fastpath_int)
if vs_iface is None:
log.error("Interface %s not found!!!",
vs_fastpath_int)
self.shutdown(1)
self.VSManager.AddFastPath(vs_fastpath_int, vs_fastpath_port)
log.info("Bringing up interfaces added to virtual switch")
for iface in self.netlink.ifaceTable:
if iface['state'] != 'UP':
ifname = iface['ifname']
subprocess.call(['ip', 'link', 'set', ifname, 'up'])
@set_ev_cls(RheaFlowEvents.EventRouterConnect)
def handler_router_connect(self, ev):
log.info("Event is %s", ev)
routerid = ev.routerid
log.info("Router with address %s has connected with port %s",
routerid[0], routerid[1])
@set_ev_cls(RheaFlowEvents.EventNeighbourNotify)
def neighbour_handler(self, ev):
'''Handles neigbour added or removed'''
event = ev.action
neighbour = ev.neighbour
vsindex = neighbour['ifindex']
if vsindex in self.vsif_to_ofp:
vs_ofport = self.vsif_to_ofp[vsindex]
vs_interface = self.netlink.find_interface(vsindex)
if event == 'RTM_NEWNEIGH':
self.flowprocessor.new_dphost_add_flow(neighbour,
self.table,
vs_interface,
vs_ofport)
elif event == 'RTM_DELNEIGH':
vs_interface = self.netlink.find_interface(vsindex)
if vs_interface is None:
return
self.flowprocessor.delete_host_flow(neighbour, self.table,
vs_interface, vs_ofport)
else:
log.info("Neigbour event %s happened", event)
@set_ev_cls(RheaFlowEvents.EventRouterDisconnect)
def handler_router_disconnect(self, ev):
log.info("Event is %s", ev)
routerid = ev.routerid
log.info("Router with address %s connected with port %s is\
disconnecting", routerid[0], routerid[1])
@set_ev_cls(RheaFlowEvents.EventRouteDeleted)
def handler_remove_route(self, ev):
''' Event handler for deleting rules for
the route that is been deleted.
'''
route = ev.route
next_hop = route[1]
nh_interface = self.netlink.find_interface_by_ip(next_hop)
if nh_interface is None:
nh_host = self.netlink.ip_host_lookup(next_hop)
if nh_host is not None:
self.flowprocessor.delete_flows(route, self.table,
self.netlink.ifaceTable,
self.netlink.neighbours,
self.vsif_to_ofp,
host=nh_host)
else:
pass
else:
self.flowprocessor.delete_flows(route, self.table,
self.netlink.ifaceTable,
self.netlink.neighbours,
self.vsif_to_ofp,
interface=nh_interface)
@set_ev_cls(RheaFlowEvents.EventRouteReceived)
def handler_route_received(self, ev):
''' Event handler for converting routes received
from router into OpenFlow rules.
'''
route = ev.route
next_hop = route[1]
nh_interface = self.netlink.find_interface_by_ip(next_hop)
if nh_interface is None:
nh_host = self.netlink.ip_host_lookup(next_hop)
if nh_host is None:
if next_hop in self.netlink.unresolvedneighbours:
log.error("%s is unreachable, flow cannot be installed!!!",
next_hop)
else:
self.netlink.NeighbourDiscovery(next_hop)
self.pendingroute.append(route)
log.info("Adding %s to pending route table", route)
else:
self.flowprocessor.convert_route_to_flow(route, self.table,
self.netlink.ifaceTable,
self.netlink.neighbours,
self.vsif_to_ofp,
host=nh_host)
else:
self.flowprocessor.convert_route_to_flow(route, self.table,
self.netlink.ifaceTable,
self.netlink.neighbours,
self.vsif_to_ofp,
interface=nh_interface)
def retry_pendingroutes(self):
for route in self.pendingroute:
next_hop = route[1]
nh_host = self.netlink.ip_host_lookup(next_hop)
if nh_host is not None:
self.flowprocessor.convert_route_to_flow(route, self.table,
self.netlink.ifaceTable,
self.netlink.neighbours,
self.vsif_to_ofp,
host=nh_host)
self.pendingroute = list(filter(lambda x: x != route,
self.pendingroute))
else:
if next_hop in self.netlink.unresolvedneighbours:
log.error("%s is unreachable, flow cannot be installed!!!",
next_hop)
hub.sleep(600)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def on_packet_in(self, ev):
''' Event handler for processing packet-ins received
by the controller from connected OpenFlow datapaths.
'''
msg = ev.msg
self.flowprocessor.handle_packet_in(msg, self.table,
self.netlink.ifaceTable,
| |
"""Helper draw method for drawing of RooDataSet
>>> dataset.draw ( 'm', 'chi2<10' )
## cuts & weight
>>> dataset.draw ( 'm', '(chi2<10)*weight' )
## use drawing options
>>> dataset.draw ( 'm', '(chi2<10)*weight' , 'e1' )
## start form event #1000
>>> dataset.draw ( 'm', '(chi2<10)*weight' , 'e1' , 1000 )
## for event in range 1000< i <10000
>>> dataset.draw ( 'm', '(chi2<10)*weight' , 'e1' , 1000 , 100000 )
"""
if isinstance ( cuts , ROOT.TCut ) : cuts = str ( cuts ).strip()
if isinstance ( what , str ) : what = what.strip()
if isinstance ( cuts , str ) : cuts = cuts.strip()
if isinstance ( opts , str ) : opts = opts.strip()
## delegate to TTree for non-weighted datasets with TTree-based storage type
if hasattr ( dataset , 'isWeighted') and not dataset.isWeighted() \
and isinstance ( what , str ) \
and isinstance ( cuts , str ) \
and isinstance ( opts , str ) :
if hasattr ( dataset , 'store' ) :
store = dataset.store()
if store :
tree = store.tree()
if tree : return tree.Draw( what , cuts , opts , *args )
if isinstance ( what , str ) :
vars = [ v.strip() for v in what.split(':') ]
return ds_draw ( dataset , vars , cuts , opts , *args )
if isinstance ( what , ROOT.RooFormulaVar ) :
return ds_draw ( dataset , what.GetTitle () , cuts , opts , *args )
if isinstance ( what , ROOT.RooAbsReal ) :
return ds_draw ( dataset , what.GetName () , cuts , opts , *args )
if not 1 <= len ( what ) <= 3 :
raise AttributeError ( 'DataSet::draw, invalid length %s' % what )
if 1 == len ( what ) :
w1 = what[0]
mn1 , mx1 = ds_var_minmax ( dataset , w1 , cuts )
histo = ROOT.TH1F ( hID() , w1 , 200 , mn1 , mx1 ) ; histo.Sumw2()
ds_project ( dataset , histo , what , cuts , *args )
histo.Draw( opts )
return histo
if 2 == len ( what ) :
w1 = what[0]
mn1 , mx1 = ds_var_minmax ( dataset , w1 , cuts )
w2 = what[1]
mn2 , mx2 = ds_var_minmax ( dataset , w2 , cuts )
histo = ROOT.TH2F ( hID() , "%s:%s" % ( w1 , w2 ) ,
50 , mn1 , mx1 ,
50 , mn2 , mx2 ) ; histo.Sumw2()
ds_project ( dataset , histo , what , cuts , *args )
histo.Draw( opts )
return histo
if 3 == len ( what ) :
w1 = what[0]
mn1 , mx1 = ds_var_minmax ( dataset , w1 , cuts )
w2 = what[1]
mn2 , mx2 = ds_var_minmax ( dataset , w2 , cuts )
w3 = what[2]
mn3 , mx3 = ds_var_minmax ( dataset , w3 , cuts )
histo = ROOT.TH3F ( hID() , "%s:%s:%s" % ( w1 , w2 , w3 ) ,
20 , mn1 , mx1 ,
20 , mn2 , mx2 ,
20 , mn2 , mx2 ) ; histo.Sumw2()
ds_project ( dataset , histo , what , cuts , *args )
histo.Draw( opts )
return histo
raise AttributeError ( 'DataSet::draw, invalid case' )
# =============================================================================
## get the attibute for RooDataSet
def _ds_getattr_ ( dataset , aname ) :
"""Get the attibute from RooDataSet
>>> dset = ...
>>> print dset.pt
"""
_vars = dataset.get()
return getattr ( _vars , aname )
# =============================================================================
## Get min/max for the certain variable in dataset
# @code
# data = ...
# mn,mx = data.vminmax('pt')
# mn,mx = data.vminmax('pt','y>3')
# @endcode
# @author <NAME> <EMAIL>
# @date 2015-09-19
def ds_var_minmax ( dataset , var , cuts = '' , delta = 0.0 ) :
"""Get min/max for the certain variable in dataset
>>> data = ...
>>> mn,mx = data.vminmax('pt')
>>> mn,mx = data.vminmax('pt','y>3')
"""
if isinstance ( var , ROOT.RooAbsReal ) : var = var.GetName()
if cuts : s = dataset.statVar ( var , cuts )
else : s = dataset.statVar ( var )
mn,mx = s.minmax()
if mn < mn and 0.0 < delta :
dx = delta * 1.0 * ( mx - mn )
mx += dx
mn -= dx
return mn , mx
ROOT.RooDataSet .vminmax = ds_var_minmax
_new_methods_ += [
ROOT.RooDataSet .vminmax ,
]
# =============================================================================
## clear dataset storage
if not hasattr ( ROOT.RooDataSet , '_old_reset_' ) :
ROOT.RooDataSet._old_reset_ = ROOT.RooDataSet.reset
def _ds_new_reset_ ( self ) :
"""Clear dataset storage
>>> print ds
>>> ds.clear()
>>> ds.erase() ## ditto
>>> ds.reset() ## ditto
>>> ds.Reset() ## ditto
>>> print ds
"""
s = self.store()
if s : s.reset()
self._old_reset_()
return len(self)
ROOT.RooDataSet.reset = _ds_new_reset_
ROOT.RooDataSet.clear = ROOT.RooDataSet.reset
ROOT.RooDataSet.erase = ROOT.RooDataSet.reset
ROOT.RooDataSet.Reset = ROOT.RooDataSet.reset
_new_methods_ += [
ROOT.RooDataSet .clear ,
ROOT.RooDataSet .erase ,
ROOT.RooDataSet .Reset ,
]
# =============================================================================
ROOT.RooDataSet.draw = ds_draw
ROOT.RooDataSet.project = ds_project
ROOT.RooDataSet .__getattr__ = _ds_getattr_
ROOT.RooDataHist.__getattr__ = _ds_getattr_
ROOT.RooDataHist.__len__ = lambda s : s.numEntries()
_new_methods_ += [
ROOT.RooDataSet.draw ,
ROOT.RooDataSet.project ,
]
# =============================================================================
## get the s-factor for (weighted) dataset, where
# s-factor is defined as
# \f$ s_{w} \equiv \frac{\sum w_i}{\sum w_i^2} \f$
# @see Ostap::SFactor::sFactor
# @code
# dataset = ...
# sf = dataset.sFactor()
# @endcode
# when the weigths comes from sPlot, the factor effectively accounts
# statitical fluctuations in background subtraction
# @see W. T. Eadie et al., Statistical methods in experimental physics,
# North Holland, Amsterdam, 1971.
# @author <NAME> <EMAIL>
# @date 2019-05-30
# =============================================================================
def _rad_sFactor_ ( data ) :
"""Get the s-factor for (weighted) dataset, where
s-factor is defined as
s_{w} equiv frac{ sum w_i}{ sum w_i^2}
- see Ostap::SFactor::sFactor
- see W. T. Eadie et al., Statistical methods in experimental physics,
... North Holland, Amsterdam, 1971.
>>> dataset = ...
>>> sf = dataset.sFactor()
"""
if 0 == data.numEntries() :
logger.warning ("RooAbsData.sFactor: dataset is empty, return 1.0")
return 1.0
sf = Ostap.SFactor.sFactor ( data )
if 0 > sf.cov2() :
logger.error ('Ostap::SFactor::sFactor %s, return 1.0' % sf )
return 1.0
elif 0 == sf.cov2() :
logger.warning ('Ostap::SFactor::sFactor %s, return 1.0' % sf )
return 1.0
return sf.value() / sf.cov2()
# =============================================================================
## print method for RooDataSet
# @code
#
# >>> print dataset
#
# @endcode
# @author <NAME> <EMAIL>
# @date 2013-07-06
def _ds_print_ ( dataset ) :
"""Helper print method:
>>> print dataset
"""
if not valid_pointer ( dataset ) : return 'Invalid dataset'
return dataset.print_multiline ( verbose = True )
ROOT.RooDataSet.draw = ds_draw
ROOT.RooDataSet.project = ds_project
ROOT.RooDataSet.__getattr__ = _ds_getattr_
ROOT.RooAbsData.sFactor = _rad_sFactor_
for d in ( ROOT.RooAbsData ,
ROOT.RooDataSet ,
ROOT.RooDataHist ) :
d.__repr__ = _ds_print_
d.__str__ = _ds_print_
d.__len__ = lambda s : s.numEntries()
_new_methods_ += [
ROOT.RooDataSet .draw ,
ROOT.RooDataSet .project ,
ROOT.RooDataSet .__getattr__ ,
ROOT.RooDataHist.__getattr__ ,
ROOT.RooDataHist.__len__ ,
ROOT.RooAbsData .sFactor
]
# =============================================================================
## add variable to dataset
def _rds_addVar_ ( dataset , vname , formula ) :
"""Add/calculate variable to RooDataSet
>>> dataset.addVar ( 'ratio' , 'pt/pz' )
"""
vlst = ROOT.RooArgList()
vset = dataset.get()
for v in vset : vlst.add ( v )
#
vcol = ROOT.RooFormulaVar ( vname , formula , formula , vlst )
dataset.addColumn ( vcol )
#
return dataset
# =============================================================================
## Add/calculate/sample variable to RooDataSet
# - Use formula expression
# @code
# dataset.add_new_var ( 'ratio' , 'pt/pz' ) ## use RooFormulaVar
# @endcode
# - Use function:
# @code
# func = ... ## Ostap.IFuncData object
# dataset.add_new_var ( 'value' , func )
# @endcode
# - Sample from 1D-historgam
# @code
# h1 = ...## 1D histogram
# dataset.add_new_var ( 'nTracks' , h1 ) ## sample from 1D histogram
# @endcode
# - Sample from 2D histogram
# @code
# h2 = ...## 2D histogram
# dataset.add_new_var ( 'Pt' , 'eta' , h2 ) ## sample from 2D histogram
# @encode
# - Sample from 3D-histogram
# @code
# h3 = ...## | |
<filename>mc_manager/curses_helpers.py
import curses
from curses.textpad import Textbox, rectangle
class item_base():
"""The base class for menu items
"""
def init_curses(self):
"""A few curses settings shared across all items
"""
curses.noecho()
curses.cbreak()
curses.curs_set(0)
def display(self, y_pos, key_x, value_x, stdscr, selected, formatting=0):
"""This is meant to be overloaded by a child
"""
pass
class item_title(item_base):
"""class for a centered menu item
"""
def __init__(self, title, on_change=None):
self.title = title
self.max_len = 0
self.name = title
def display(self, y_pos, stdscr, selected, formatting=0):
self.init_curses()
cols = stdscr.getmaxyx()[1]
window = curses.newwin(1,len(self.title)+1,y_pos,int((cols/2)-len(self.title)/2))
padding = curses.newwin(1,cols,y_pos,0)
padding.erase()
padding.addstr(" "*(cols-1))
padding.refresh()
window.erase()
window.addstr(0,0,self.title, formatting)
window.refresh()
del window
del padding
if selected:
return self.title
return None
class item_editor(item_base):
"""class for a menu item with a key and editable value
"""
def __init__(self, key, value, max_val_len=20):
"""This is a display item which has a key and an editable value
Args:
key (str): The key to be displayed
value (str,int,float,bool): The value to be edited
max_val_len (int, optional): The maximum length of the value field.
Defaults to 20.
"""
self.key=key
self.value=value
self.name = key
if type(value) is str:
self.validation = self.str_validator
elif type(value) is int:
self.validation = self.int_validator
elif type(value) is float:
self.validation = self.float_validator
self.max_val_len = max_val_len
def display(self, y_pos, key_x, value_x, stdscr, selected, formatting=0):
"""Displays the item
Args:
y_pos (int): The y position on stdscr for the item to be displayed
key_x (int): the x position on stdscr for the key to be displayed
value_x (int): the x position on stdscr for the value to be displayed
stdscr (_CursesWindow): a curses windows or pad to use
selected (bool): Whether or not this item is selected
formatting (int, optional): a curses format to use. Defaults to 0.
Returns:
None, value: returns self.value if an edit was made, otherwise None
"""
self.init_curses()
key_window=curses.newwin(1,value_x-key_x,y_pos,key_x)
value_window=curses.newwin(1,self.max_val_len,y_pos,value_x)
changed=False
if selected:
if type(self.value) is bool:
self.bool_validator(stdscr,value_window)
else:
curses.curs_set(1)
self.box = Textbox(value_window)
self.box.edit(self.validation)
self.box=None
changed=True
key_window.erase()
key_window.addstr(0,0,self.key, formatting)
value_window.erase()
value_window.addstr(str(self.value), formatting)
key_window.refresh()
value_window.refresh()
del key_window
del value_window
return (self.key,self.value) if changed else None
def str_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type str
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value=self.box.gather().strip()
return curses.ascii.BEL
else:
return key
def float_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type float
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value=float(self.box.gather().strip())
return curses.ascii.BEL
elif key == 46:
gather = self.box.gather()
# If dot hasn't been used and the string isn't empty
if (not '.' in gather) and (gather.strip()):
return key
if key in range(48,58): # allowed values
return key
def int_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type int
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
in_val = self.box.gather().strip()
if in_val != "":
self.value=int(in_val)
return curses.ascii.BEL
if key in range(48,58): # allowed values
return key
def bool_validator(self, stdscr, window): # This one's special and runs without textbox
"""This function gets a keystroke and toggles self.value, exiting without
changing on ESC and exiting with changes on ENTER
Args:
stdscr (_CursesWindow): The parent screen object
window (_CursesWindow): The window object text is being written to
Returns:
int: the key to returns
"""
value = self.value
while True:
key = stdscr.getch()
if key == 27:
return value
elif key in [curses.KEY_UP, curses.KEY_DOWN,
curses.KEY_LEFT, curses.KEY_RIGHT, 32]: # 32 is space
value = not value
window.erase()
window.addstr(str(value), curses.A_STANDOUT)
window.refresh()
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value = value
return value
class list_base():
"""base class for lists of items
"""
def __init__(self, items):
self.items = items
self.selected = 0
self.returnVal = None
def display(self, stdscr):
"""Displays a list of items
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
any: returns whatever the child class sets self.returnVal to
"""
self.rows, self.cols = stdscr.getmaxyx()
self.middle_col = int(self.cols/2)
self.start = 0
stdscr.erase()
stdscr.refresh()
self.pre_loop(stdscr)
while True:
self.rows, self.cols = stdscr.getmaxyx()
if not self.loop(stdscr):
break
if not self.get_key(stdscr):
break
self.post_loop(stdscr)
return self.returnVal
def pre_loop(self, stdscr):
"""This is run before the main loop, and is available to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
"""
pass
def loop(self, stdscr):
"""This is the main loop, and is meant to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: True to continue loop, false otherwise
"""
return True
def post_loop(self, stdscr):
"""This is run after the loop completes and is available to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
"""
pass
def get_key(self, stdscr):
"""This function handles commonly used keys,
and calls overloadable functions to deal with them
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: True to continue the main loop, False to stop
"""
key = stdscr.getch()
if key == curses.KEY_DOWN:
return self.key_down()
elif key == curses.KEY_UP:
return self.key_up()
if key in [curses.KEY_ENTER, 10, 13]:
return self.key_enter()
elif key == 27:
return False
else:
return True
def key_enter(self):
"""This is a function called when enter is pressed
it is available to be overloaded
Returns:
bool: True to continue the main loop, False to stop
"""
return True
def key_up(self):
"""This is a function called when the up key is pressed
it is available to be overloaded, but calls sel_up() by default
Returns:
bool: True to continue the main loop, False to stop
"""
return self.sel_up()
def key_down(self):
"""This is a function called when the down key is pressed
it is available to be overloaded, but calls sel_down() by default
Returns:
bool: True to continue the main loop, False to stop
"""
return self.sel_down()
def sel_up(self):
"""This function is called to move the cursor up
"""
if self.selected == self.start:
if self.start > 0:
self.start -= 1
self.selected -= 1
else:
self.selected = len(self.items)-1
self.start = max(0,len(self.items)-self.rows)
else:
self.selected -= 1
return True
def sel_down(self):
"""This function is called to move the cursor down
"""
if self.selected + 1 >= self.rows + self.start or self.selected >= len(self.items) - 1:
if ((self.start + self.rows < len(self.items)) and (self.selected < len(self.items))):
self.start += 1
self.selected += 1
else:
self.selected = 0
self.start = 0
else:
self.selected += 1
return True
class list_editor(list_base):
"""class for a list of item_editor items
"""
def __init__(self, items):
"""Calls parent init and also finds the
largest sized string in the list of items given
Args:
items (list): a list of items which share the item_base parent
to be displayed in the list
"""
super().__init__(items)
self.keylength = (
max(
map(
len,
(
(x.key if type(x) is item_editor else "" for x in self.items)
)
)
)
)
self.edit = False
return
def pre_loop(self, stdscr):
"""Sets up the variable returnVal to be used as a list
Args:
stdscr (_CursesWindow): The window object to display to
"""
self.returnVal = []
return
def loop(self, stdscr):
"""This is the function called in the loop
inside the parent's display() function
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: true to continue | |
return "{}, {}-normed, {} (by {})".format(
comp_map[comp][1], normalization, xvlong, algo
)
else:
return "Undefined"
def seconds_elapsed(elapsed):
""" Convert a string from the json file, like "5 days, 2:45:32.987", into integer seconds.
:param elapsed: string scraped from json file
:return: seconds represented by the elapsed string, as an integer
"""
parts = elapsed.split(":")
if len(parts) != 3:
return 0
seconds = int(float(parts[2]))
minutes = int(parts[1])
if "days" in parts[0]:
hours = int(parts[0].split(" days, ")[1])
days = int(parts[0].split(" days, ")[0])
elif "day" in parts[0]:
hours = int(parts[0].split(" day, ")[1])
days = int(parts[0].split(" day, ")[0])
else:
hours = int(parts[0])
days = 0
return seconds + (minutes * 60) + (hours * 3600) + (days * 3600 * 24)
def result_description(file_path):
""" From any file path, return a dictionary with an up-to-date description of its characteristics.
:param str file_path: The path to the result file
"""
required_bids_keys = [
'sub', 'hem', 'samp', 'prob', 'parby', 'splby', 'batch', 'tgt', 'algo', 'shuf',
'comp', 'mask', 'norm', 'adj', 'top_subdir',
]
d = dict_from_bids(file_path)
if 'sub' in d:
if d['sub'] in ['all', ]:
pass
elif d['sub'] in ['H03511009', 'H03511012', 'H03511015', 'H03511016', 'H03512001', 'H03512002', ]:
d['samp'] = d.get('ctx', d.get('set', 'UNKNOWN'))
d['prob'] = 'richiardi'
d['parby'] = 'wellid'
d['splby'] = 'none'
d['batch'] = 'all'
else:
match = None
if match is None:
re_str = r"^(?P<pby>glasser|wellid)(?P<phase>test|train)(?P<seed>\d+)$"
match = re.compile(re_str).match(d['sub'])
if match:
d['splby'] = 'wellid'
if match is None:
re_str = r"^(?P<pby>glasser|wellid)(?P<phase>test|train)by(?P<sby>glasser|wellid)(?P<seed>\d+)$"
match = re.compile(re_str).match(d['sub'])
if match:
d['splby'] = match.group('sby')
if match:
d['sub'] = 'all'
d['hem'] = 'A'
d['samp'] = 'glasser'
d['parby'] = match.group('pby')
d['batch'] = "{}{:05}".format(match.group('phase'), int(match.group('seed')))
if 'alg' in d:
d['algo'] = d['alg']
if 'prb' in d:
d['prob'] = d['prb']
if 'msk' in d:
d['mask'] = d['msk']
if 'cmp' in d:
d['comp'] = d['cmp']
if 'norm' not in d:
d['norm'] = 'none'
errors = []
for k in required_bids_keys:
if k not in d:
errors.append("no {}".format(k))
return d, errors
def path_to(cmd, args, path_type='result', include_file=True, dir_for_intermediates=False, log_file=False):
""" Build the path requested and return it.
Paths should be consistent, so this is the only place we want to be generating paths.
But there are different paths even within the same command, so we need flexibility in
determining which file/dir is required.
:param str cmd: The command being run
:param args: The command-line arguments passed to cmd
:param str path_type: Generate a path to a split, result, log, etc
:param bool include_file: Just the 'dir' or the whole 'file' path
:param bool dir_for_intermediates: Shall we include an additional subdirectory for intermediate files?
:param bool log_file: Set to true to get the path to a log file rather than data file
"""
ext = "" # normally, we won't need to append an extension.
bids_dict = {
'data': args.data if 'data' in args else "",
'cmd': cmd,
'sub': donor_name(args.donor if 'donor' in args else ""),
'hem': args.hemisphere if 'hemisphere' in args else "",
'splby': args.splitby if 'splitby' in args else "",
'parby': args.parcelby if 'parcelby' in args else "",
'samp': args.samples if 'samples' in args else "",
'prob': args.probes if 'probes' in args else "",
'tgt': args.direction if 'direction' in args else "",
'algo': args.algorithm if 'algorithm' in args else "",
'shuf': args.shuffle if 'shuffle' in args else "",
'norm': args.expr_norm if 'expr_norm' in args else "",
'adj': args.adjust if 'adjust' in args else "",
'start': args.beginning.strftime("%Y%m%d%H%M%S") if "beginning" in args else "",
'seed': args.seed if 'seed' in args else 0,
'batch': args.batch if 'batch' in args else 'whole',
'top_subdir': 'derivatives',
}
if 'comparator' in args:
bids_dict['comp'] = bids_clean_filename(args.comparator)
elif 'comp' in args:
bids_dict['comp'] = args.comp
elif 'cmp' in args:
bids_dict['comp'] = args.cmp
if "masks" in args and len(args.masks) > 0:
bids_dict['mask'] = '+'.join(bids_clean_filename(args.masks))
else:
bids_dict['mask'] = 'none'
if "comparatorsimilarity" in args and args.comparatorsimilarity:
bids_dict['comp'] = bids_dict['comp'] + "sim"
# Make the BIDS name out of the dict values
if log_file:
bids_dict['make_log_file'] = True
ext = ".log"
if cmd == 'split' or ('command' in args and args.command == 'split') or path_type == 'split':
bids_dict['top_subdir'] = 'splits'
new_name = split_path_from_dict(bids_dict)
else:
new_name = result_path_from_dict(bids_dict)
if dir_for_intermediates:
intermediate_dir = 'intdata_' + '-'.join([bids_clean_filename(args.comparator), bids_dict['mask'], args.adjust])
new_name = os.path.join(new_name[:new_name.rfind("/")], intermediate_dir)
os.makedirs(os.path.abspath(new_name), exist_ok=True)
else:
os.makedirs(os.path.dirname(os.path.abspath(new_name)), exist_ok=True)
if include_file:
return new_name + ext
else:
return new_name[:new_name.rfind("/")]
def sub_dir_source(d):
""" build out the source portion of the directory structure.
:param dict d: A dictionary holding BIDS terms for path-building
"""
return "_".join([
'-'.join(['sub', d['sub'], ]),
'-'.join(['hem', d['hem'], ]),
'-'.join(['samp', d['samp'], ]),
'-'.join(['prob', d['prob'], ]),
])
def sub_dir_by_split(d):
""" build out the split portion of the results directory structure.
:param dict d: A dictionary holding BIDS terms for path-building
"""
return "_".join([
'-'.join(['parby', d['parby'], ]),
'-'.join(['splby', d['splby'], ]),
'-'.join(['batch', d['batch'], ]),
])
def sub_dir_algo(d):
""" build out the algorithm portion of the directory structure.
:param dict d: A dictionary holding BIDS terms for path-building
"""
return "_".join([
'-'.join(['tgt', d['tgt'], ]),
'-'.join(['algo', d['algo'], ]),
'-'.join(['shuf', d['shuf'], ]),
])
def result_file_name(d):
""" build out the split portion of the directory structure.
:param dict d: A dictionary holding BIDS terms for path-building
"""
return "_".join([
'-'.join(['sub', d['sub'], ]),
'-'.join(['comp', d['comp'], ]),
'-'.join(['mask', d['mask'], ]),
'-'.join(['norm', d['norm'], ]),
'-'.join(['adj', d['adj'], ]),
])
def split_path_from_dict(d):
""" Build the correct path for a split file from the d dict.
:param dict d: A dictionary holding BIDS terms for path-building
"""
# There are only three conditions where this function would be called.
# 1. We are pushing and want to use a previously split file for expression data.
# In this case, we need to know which split and batch to use, and we need a file.
if d['cmd'] == 'push':
if 'parby' in d and 'parcelby' not in d:
d['parcelby'] = d['parby']
if 'splby' in d and 'splitby' not in d:
d['splitby'] = d['splby']
file_name = split_file_name(d, 'df')
return os.path.join(d['data'], d['top_subdir'], sub_dir_source(d),
'-'.join(['batch', d['batch'], ]), file_name)
# 2. We are splitting expression data and the logger wants to start a log file. (no parcelby or batch exist yet)
elif d.get('make_log_file', False):
file_name = split_log_name(d)
return os.path.join(d['data'], 'splits', sub_dir_source(d), file_name)
# 2. We are splitting expression data and need a base folder to start with. (no parcelby or batch exist yet)
else:
file_name = "IGNORED.FILE"
return os.path.join(d['data'], 'splits', sub_dir_source(d), file_name) # file_name will be stripped off later
def result_path_from_dict(d):
""" Build the correct path for output from the d dict.
:param dict d: A dictionary holding BIDS terms for path-building
"""
file_name = result_file_name(d)
if d['sub'] == 'test':
file_name = '_'.join(['dt-' + d['start'], file_name])
# The most common, default path construction:
new_name = os.path.join(
d['data'],
d.get('top_subdir', ''),
sub_dir_source(d),
sub_dir_by_split(d),
sub_dir_algo(d),
file_name
)
# Building reports and generating data require different levels of name
if d['cmd'] == 'order':
new_name = '_'.join([new_name, 'order'])
if d['shuf'] != 'none':
new_name = '_'.join([new_name, 'seed-{0:05d}'.format(int(d['seed']))])
return new_name
def get_entrez_id_from_gene_name(gene_name, data_dir="/data"):
""" Lookup the gene symbol gene_name and return its entrez_id
:param gene_name:
:param data_dir: The PYGEST_DATA base path
:return:
"""
global human_genome_info
global symbol_to_id_map
entrez_id = ""
entrez_source = ""
# Inevitably, we'll be called repeatedly on blank gene names.
if gene_name == "":
return 0, ""
# The LOC00000 genes are named after their entrez_id. No point looking them up.
if gene_name.startswith("LOC"):
try:
return int(gene_name[3:]), "loc"
except ValueError:
# The rest of the symbol is not a number; try it in the mapper later
pass
# Use the dictionary we built first. It's fastest. But if it doesn't work, we may need to spend some time.
try:
return symbol_to_id_map[gene_name], "map"
except KeyError:
# print("searching for {}, not mappable".format(gene_name))
# Do this two ways to see if they get the same results.
with open(os.path.join(data_dir, "sourcedata", "Homo_sapiens.gene_info"), 'r') as f:
for line in f:
match = re.search(r"^(\d+)\s+(\d+)\s+.*{}.*$".format(gene_name), line)
if match:
try:
entrez_id = int(match.group(2))
entrez_source = "extra fields"
except ValueError:
print("Found {} in file, but {} is not a number.".format(
gene_name, match.group(2)
))
mask = human_genome_info[['dbXrefs', 'description']].applymap(lambda x: gene_name in str(x)).any(axis=1)
if mask.sum() == 1:
| |
<reponame>kwlzn/model-analysis
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for performing evaluations using the EvalMetricsGraph."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
# Standard Imports
import apache_beam as beam
import numpy as np
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_metrics_graph import eval_metrics_graph
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from typing import Any, Dict, Generator, Iterable, List, Optional, Text, Tuple, Union
@beam.ptransform_fn
@beam.typehints.with_input_types(Tuple[slicer.SliceKeyType, types.Extracts])
@beam.typehints.with_output_types(Tuple[slicer.SliceKeyType, Dict[Text, Any]])
def ComputePerSliceMetrics( # pylint: disable=invalid-name
slice_result: beam.pvalue.PCollection,
eval_shared_model: types.EvalSharedModel,
desired_batch_size: Optional[int] = None,
compute_with_sampling: Optional[bool] = False,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PCollection:
"""PTransform for computing, aggregating and combining metrics.
Args:
slice_result: Incoming PCollection consisting of slice key and extracts.
eval_shared_model: Shared model parameters for EvalSavedModel.
desired_batch_size: Optional batch size for batching in Aggregate.
compute_with_sampling: True to compute with sampling.
random_seed_for_testing: Seed to use for unit testing.
Returns:
PCollection of (slice key, dict of metrics).
"""
# TODO(b/123516222): Remove this workaround per discussions in CL/227944001
slice_result.element_type = beam.typehints.Any
return (
slice_result
# _ModelLoadingIdentityFn loads the EvalSavedModel into memory
# under a shared handle that can be used by subsequent steps.
# Combiner lifting and producer-consumer fusion should ensure
# that these steps run in the same process and memory space.
# TODO(b/69566045): Remove _ModelLoadingIdentityFn and move model
# loading to CombineFn.setup after it is available in Beam.
| 'LoadModel' >> beam.ParDo(
_ModelLoadingIdentityFn(eval_shared_model=eval_shared_model))
| 'CombinePerSlice' >> beam.CombinePerKey(
_AggregateCombineFn(
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size,
compute_with_sampling=compute_with_sampling,
seed_for_testing=random_seed_for_testing))
| 'InterpretOutput' >> beam.ParDo(
_ExtractOutputDoFn(eval_shared_model=eval_shared_model)))
def _add_metric_variables( # pylint: disable=invalid-name
left: types.MetricVariablesType,
right: types.MetricVariablesType) -> types.MetricVariablesType:
"""Returns left and right metric variables combined."""
if left is not None and right is not None:
if len(left) != len(right):
raise ValueError('metric variables lengths should match, but got '
'%d and %d' % (len(left), len(right)))
return [x + y for x, y in zip(left, right)]
elif left is not None:
return left
else:
return right
class _AggState(object):
"""Combine state for AggregateCombineFn.
There are two parts to the state: the metric variables (the actual state),
and a list of FeaturesPredictionsLabels or other inputs. See
_AggregateCombineFn for why we need this.
"""
__slots__ = ['metric_variables', 'inputs']
def __init__(self):
self.metric_variables = None # type: Optional[types.MetricVariablesType]
self.inputs = [
] # type: List[Union[bytes, types.FeaturesPredictionsLabels]]
def copy_from( # pylint: disable=invalid-name
self, other: '_AggState') -> None:
if other.metric_variables:
self.metric_variables = other.metric_variables
self.inputs = other.inputs
def __iadd__(self, other: '_AggState') -> '_AggState':
self.metric_variables = _add_metric_variables(self.metric_variables,
other.metric_variables)
self.inputs.extend(other.inputs)
return self
def add_input(self, new_input) -> None:
self.inputs.append(new_input)
def add_metrics_variables( # pylint: disable=invalid-name
self, metric_variables: types.MetricVariablesType) -> None:
self.metric_variables = _add_metric_variables(self.metric_variables,
metric_variables)
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Optional[List[Any]])
class _AggregateCombineFn(model_util.CombineFnWithModels):
"""Aggregate combine function.
This function really does three things:
1. Batching of FeaturesPredictionsLabels.
3. "Partial reduction" of these batches by sending this through the
"intro metrics" step.
3. The "normal" combining of MetricVariables.
What we really want to do is conceptually the following:
Predictions | GroupByKey() | KeyAwareBatchElements()
| ParDo(IntroMetrics()) | CombineValues(CombineMetricVariables()).
but there's no way to KeyAwareBatchElements in Beam, and no way to do partial
reductions either. Hence, this CombineFn has to do the work of batching,
partial reduction (intro metrics), and actual combining, all in one.
We do this by accumulating FeaturesPredictionsLabels in the combine state
until we accumulate a large enough batch, at which point we send them
through the "intro metrics" step. When merging, we merge the metric variables
and accumulate FeaturesPredictionsLabels accordingly. We do one final
"intro metrics" and merge step before producing the final output value.
See also:
BEAM-3737: Key-aware batching function
(https://issues.apache.org/jira/browse/BEAM-3737).
"""
# This needs to be large enough to allow for efficient TF invocations during
# batch flushing, but shouldn't be too large as it also acts as cap on the
# maximum memory usage of the computation.
_DEFAULT_DESIRED_BATCH_SIZE = 1000
def __init__(self,
eval_shared_model: types.EvalSharedModel,
desired_batch_size: Optional[int] = None,
compute_with_sampling: Optional[bool] = False,
seed_for_testing: Optional[int] = None) -> None:
super(_AggregateCombineFn,
self).__init__({'': eval_shared_model.model_loader})
self._seed_for_testing = seed_for_testing
self._eval_metrics_graph = None # type: eval_metrics_graph.EvalMetricsGraph
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = self._DEFAULT_DESIRED_BATCH_SIZE
self._compute_with_sampling = compute_with_sampling
self._random_state = np.random.RandomState(seed_for_testing)
# Metrics.
self._combine_batch_size = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'combine_batch_size')
self._num_compacts = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_compacts')
def _poissonify(self, accumulator: _AggState) -> List[bytes]:
# pylint: disable=line-too-long
"""Creates a bootstrap resample of the data in an accumulator.
Given a set of data, it will be represented in the resample set a number of
times, that number of times is drawn from Poisson(1).
See
http://www.unofficialgoogledatascience.com/2015/08/an-introduction-to-poisson-bootstrap26.html
for a detailed explanation of the technique. This will work technically with
small or empty batches but as the technique is an approximation, the
approximation gets better as the number of examples gets larger. If the
results themselves are empty TFMA will reject the sample. For any samples of
a reasonable size, the chances of this are exponentially tiny. See "The
mathematical fine print" section of the blog post linked above.
Args:
accumulator: Accumulator containing FPLs from a sample
Returns:
A list of FPLs representing a bootstrap resample of the accumulator items.
"""
result = []
if accumulator.inputs:
poisson_counts = self._random_state.poisson(1, len(accumulator.inputs))
for i, input_item in enumerate(accumulator.inputs):
result.extend([input_item] * poisson_counts[i])
return result
def _maybe_do_batch(self,
accumulator: _AggState,
force: bool = False) -> None:
"""Maybe intro metrics and update accumulator in place.
Checks if accumulator has enough FPLs for a batch, and if so, does the
intro metrics for the batch and updates accumulator in place.
Args:
accumulator: Accumulator. Will be updated in place.
force: Force intro metrics even if accumulator has less FPLs than the
batch size.
"""
if self._eval_metrics_graph is None:
self._setup_if_needed()
if self._loaded_models[''].eval_saved_model is None:
raise ValueError('ModelLoader does not support eval_saved_model.')
self._eval_metrics_graph = self._loaded_models[''].eval_saved_model
batch_size = len(accumulator.inputs)
if force or batch_size >= self._desired_batch_size:
if accumulator.inputs:
self._combine_batch_size.update(batch_size)
inputs_for_metrics = accumulator.inputs
if self._compute_with_sampling:
# If we are computing with multiple bootstrap replicates, use fpls
# generated by the Poisson bootstrapping technique.
inputs_for_metrics = self._poissonify(accumulator)
if inputs_for_metrics:
accumulator.add_metrics_variables(
self._eval_metrics_graph.metrics_reset_update_get_list(
inputs_for_metrics))
else:
# Call to metrics_reset_update_get_list does a reset prior to the
# metrics update, but does not handle empty updates. Explicitly
# calling just reset here, to make the flow clear.
self._eval_metrics_graph.reset_metric_variables()
del accumulator.inputs[:]
def create_accumulator(self) -> _AggState:
return _AggState()
def add_input(self, accumulator: _AggState,
elem: types.Extracts) -> _AggState:
accumulator.add_input(elem[constants.INPUT_KEY])
self._maybe_do_batch(accumulator)
return accumulator
def merge_accumulators(self, accumulators: Iterable[_AggState]) -> _AggState:
result = self.create_accumulator()
for acc in accumulators:
result += acc
# Compact within the loop to avoid accumulating too much data.
#
# During the "map" side of combining merging happens with memory limits
# but on the "reduce" side it's across all bundles (for a given key).
#
# So we could potentially accumulate get num_bundles * batch_size
# elements if we don't process the batches within the loop, which
# could cause OOM errors (b/77293756).
self._maybe_do_batch(result)
return result
def compact(self, accumulator: _AggState) -> _AggState:
self._maybe_do_batch(accumulator, force=True) # Guaranteed compaction.
self._num_compacts.inc(1)
return accumulator
def extract_output(
self, accumulator: _AggState) -> Optional[types.MetricVariablesType]:
# It's possible that the accumulator has not been fully flushed, if it was
# not produced by a call to compact (which is not guaranteed across all Beam
# Runners), so we defensively flush it here again, before we extract data
# from it, to ensure correctness.
self._maybe_do_batch(accumulator, force=True)
return accumulator.metric_variables
@beam.typehints.with_input_types(Tuple[slicer.SliceKeyType,
Optional[List[Any]]])
# TODO(b/123516222): Add output typehints. Similarly elsewhere that it applies.
class _ExtractOutputDoFn(model_util.DoFnWithModels):
"""A DoFn that extracts the metrics output."""
def | |
import environments.ControlledRangeVariance
from opebet import wealth_lb_1d, wealth_lb_2d, wealth_2d, wealth_lb_2d_individual_qps
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
def getenv(wsq, tv=None):
wsupport = [0, 0.5, 2, 100]
env = environments.ControlledRangeVariance.ControlledRangeVariance(seed=90210, wsupport=wsupport, expwsq=wsq, tv=tv)
return env, env.getpw(), env.range(), env.expectedwsq()
def compress(data):
# could be improved but it's used only for debugging.
sd = sorted(tuple(datum) for datum in data)
from itertools import groupby
return [(len(list(g)),) + tuple(map(float, k)) for k, g in groupby(sd)]
def produce_results(env, method, alpha, ndata=100, reps=10):
wmin, wmax = env.range()
ubd = np.zeros(ndata)
lbd = np.zeros(ndata)
cov = np.zeros((reps, ndata))
width = np.zeros((reps, ndata))
bounds = []
for i in range(reps):
(truevalue, data) = env.sample(ndata)
try:
cs = method(data=data, wmin=wmin, wmax=wmax, alpha=alpha)
assert np.isfinite(cs[0]).all() and np.isfinite(cs[1]).all()
assert np.all(cs[1] >= cs[0] - 1e-4)
assert cs[1][-1] <= 1 + 1e-4
assert cs[0][-1] >= -1e-4
except:
import json
with open('bad_case.json', 'w') as out:
perm_state = list(env.perm_state)
perm_state[1] = list(map(int, perm_state[1]))
out.write(json.dumps((float(truevalue), compress(data), perm_state, float(wmin), float(wmax), alpha)))
print('truevalue was {}'.format(truevalue))
print('data was {}'.format(compress(data)))
print('wmin, wmax was {} {}'.format(wmin, wmax))
print('ci was {} {}'.format(cs[0][-1], cs[1][-1]))
raise
np.greater_equal(cs[1], truevalue, out=ubd)
np.less_equal(cs[0], truevalue, out=lbd)
cov[i, :] = ubd * lbd
width[i, :] += np.subtract(cs[1], cs[0])
bounds.append((truevalue, cs[0], cs[1]))
upper_ends = [d[2][-1] for d in bounds]
lower_ends = [d[1][-1] for d in bounds]
upperbounded = [1 if d[0] <= d[2][-1] else 0 for d in bounds]
lowerbounded = [1 if d[1][-1] <= d[0] else 0 for d in bounds]
covered = [1 if u * l > 0 else 0 for (u, l) in zip(upperbounded, lowerbounded)]
final_width = [d[2][-1] - d[1][-1] for d in bounds]
def std_mean(x):
return np.std(x, ddof=1) / np.sqrt(len(x) - 1)
dbg = {
'cov': np.mean(covered),
'covstd': std_mean(covered),
'ubcov': np.mean(upperbounded),
'lbcov': np.mean(lowerbounded),
'final_width': np.mean(final_width),
'widthstd': std_mean(final_width),
'widthlo': np.quantile(final_width, q=[0.05])[0],
'widthhi': np.quantile(final_width, q=[0.95])[0],
'ub': np.mean(upper_ends),
'lb': np.mean(lower_ends),
}
verbose = True
if verbose:
print('{}'.format((ndata, {k: np.round(v, 4) for k, v in dbg.items()})), flush=True)
return (ndata,
{
'cov': np.mean(cov, axis=0),
'covstd': np.std(cov, axis=0, ddof=1) / np.sqrt(cov.shape[0] - 1),
'width': np.mean(width, axis=0),
'widtstd': np.std(width, axis=0, ddof=1) / np.sqrt(width.shape[0] - 1),
},
)
def produce_results_ci(env, method, alpha, ndata=100, reps=10):
wmin, wmax = env.range()
ubd = np.zeros(1)
lbd = np.zeros(1)
cov = np.zeros(reps)
width = np.zeros(reps)
bounds = []
for i in range(reps):
(truevalue, data) = env.sample(ndata)
try:
cs = method(data=data, wmin=wmin, wmax=wmax, alpha=alpha)
assert np.isfinite(cs[0]) and np.isfinite(cs[1])
assert cs[1] >= cs[0] - 1e-4
assert cs[1] <= 1 + 1e-4
assert cs[0] >= -1e-4
except:
import json
with open('bad_case.json', 'w') as out:
perm_state = list(env.perm_state)
perm_state[1] = list(map(int, perm_state[1]))
out.write(json.dumps((float(truevalue), compress(data), perm_state, float(wmin), float(wmax), alpha)))
print('truevalue was {}'.format(truevalue))
print('data was {}'.format(compress(data)))
print('wmin, wmax was {} {}'.format(wmin, wmax))
print('ci was {} {}'.format(cs[0], cs[1]))
raise
np.greater_equal(cs[1], truevalue, out=ubd)
np.less_equal(cs[0], truevalue, out=lbd)
cov[i] = ubd * lbd
width[i] += np.subtract(cs[1], cs[0])
bounds.append((truevalue, cs[0], cs[1]))
upper_ends = [d[2] for d in bounds]
lower_ends = [d[1] for d in bounds]
upperbounded = [1 if d[0] <= d[2] else 0 for d in bounds]
lowerbounded = [1 if d[1] <= d[0] else 0 for d in bounds]
covered = [1 if u * l > 0 else 0 for (u, l) in zip(upperbounded, lowerbounded)]
final_width = [d[2] - d[1] for d in bounds]
def std_mean(x):
return np.std(x, ddof=1) / np.sqrt(len(x) - 1)
dbg = {
'cov': np.mean(covered),
'covstd': std_mean(covered),
'ubcov': np.mean(upperbounded),
'lbcov': np.mean(lowerbounded),
'final_width': np.mean(final_width),
'widthstd': std_mean(final_width),
'widthlo': np.quantile(final_width, q=[0.05])[0],
'widthhi': np.quantile(final_width, q=[0.95])[0],
'ub': np.mean(upper_ends),
'lb': np.mean(lower_ends),
}
verbose = True
if verbose:
print('{}'.format((ndata, {k: np.round(v, 4) for k, v in dbg.items()})), flush=True)
return (ndata,
{
'cov': np.mean(cov, axis=0),
'covstd': np.std(cov, axis=0, ddof=1) / np.sqrt(cov.shape[0] - 1),
'width': np.mean(width, axis=0),
'widtstd': np.std(width, axis=0, ddof=1) / np.sqrt(width.shape[0] - 1),
},
)
def bet_1d(data, wmin, wmax, alpha):
lb, ub = wealth_lb_1d(data, wmin, wmax, alpha)
return np.array(lb), np.array(ub)
def bet_2d(data, wmin, wmax, alpha):
lb, ub = wealth_lb_2d(data, wmin, wmax, alpha)
return np.array(lb), np.array(ub)
def bet_log(data, wmin, wmax, alpha):
lb, ub = wealth_2d(data, wmin, wmax, alpha)
return np.array(lb), np.array(ub)
def bet_iqp(data, wmin, wmax, alpha):
lb, ub = wealth_lb_2d_individual_qps(data, wmin, wmax, alpha)
return np.array(lb), np.array(ub)
# Copied from
# https://github.com/pmineiro/elfcb
# Why not import it? I modified some code in asymptoticconfidenceinterval below
# TODO: send a PR.
def estimate(datagen, wmin, wmax, rmin=0, rmax=1, raiseonerr=False, censored=False):
import numpy as np
from scipy.optimize import brentq
assert wmin >= 0
assert wmin < 1
assert wmax > 1
assert rmax >= rmin
num = sum(c for c, w, r in datagen())
assert num >= 1
# solve dual
def sumofw(beta):
return sum((c * w)/((w - 1) * beta + num)
for c, w, _ in datagen()
if c > 0)
# fun fact about the MLE:
#
# if \frac{1}{n} \sum_n w_n < 1 then \beta^* wants to be negative
# but as wmax \to \infty, lower bound on \beta^* is 0
# therefore the estimate becomes
#
# \hat{V}(\pi) = \left( \frac{1}{n} \sum_n w_n r_n \right) +
# \left( 1 - \frac{1}{n} \sum_n w_n \right) \rho
#
# where \rho is anything between rmin and rmax
def graddualobjective(beta):
return sum(c * (w - 1)/((w - 1) * beta + num)
for c, w, _ in datagen()
if c > 0)
betamax = min( ((num - c) / (1 - w)
for c, w, _ in datagen()
if w < 1 and c > 0 ),
default=num / (1 - wmin))
betamax = min(betamax, num / (1 - wmin))
betamin = max( ((num - c) / (1 - w)
for c, w, _ in datagen()
if w > 1 and c > 0 ),
default=num / (1 - wmax))
betamin = max(betamin, num / (1 - wmax))
gradmin = graddualobjective(betamin)
gradmax = graddualobjective(betamax)
if gradmin * gradmax < 0:
betastar = brentq(f=graddualobjective, a=betamin, b=betamax)
elif gradmin < 0:
betastar = betamin
else:
betastar = betamax
remw = max(0.0, 1.0 - sumofw(betastar))
if censored:
vnumhat = 0
vdenomhat = 0
for c, w, r in datagen():
if c > 0:
if r is not None:
vnumhat += w*r* c/((w - 1) * betastar + num)
vdenomhat += w*1* c/((w - 1) * betastar + num)
if np.allclose(vdenomhat, 0):
vhat = vmin = vmax = None
else:
vnummin = vnumhat + remw * rmin
vdenommin = vdenomhat + remw
vmin = min([ vnummin / vdenommin, vnumhat / vdenomhat ])
vnummax = vnumhat + remw * rmax
vdenommax = vdenomhat + remw
vmax = max([ vnummax / vdenommax, vnumhat / vdenomhat ])
vhat = 0.5*(vmin + vmax)
else:
vhat = 0
for c, w, r in datagen():
if c > 0:
vhat += w*r* c/((w - 1) * betastar + num)
vmin = vhat + remw * rmin
vmax = vhat + remw * rmax
vhat += remw * (rmin + rmax) / 2.0
return vhat, {
'betastar': betastar,
'vmin': vmin,
'vmax': vmax,
'num': num,
'qfunc': lambda c, w, r: c / (num + betastar * (w - 1)),
}
# Copied from
# https://github.com/pmineiro/elfcb/blob/d0daf9e634b2382001f9b336a715e35fa2fd8619/MLE/MLE/asymptoticconfidenceinterval.py
# NB: that was the git HEAD when I copied it
# NB: a small modification was done to avoid numerical issues with scipy.stats.f.isf when dfd > 23000
def asymptoticconfidenceinterval(datagen, wmin, wmax, alpha=0.05,
rmin=0, rmax=1, raiseonerr=False):
from scipy.special import xlogy
from scipy.stats import f, chi2
from math import exp, log
import numpy as np
assert wmin >= 0
assert wmin < 1
assert wmax > 1
assert rmax >= rmin
vhat, qmle = estimate(datagen=datagen, wmin=wmin, wmax=wmax,
rmin=rmin, rmax=rmax, raiseonerr=raiseonerr)
num = qmle['num']
if num < 2:
return ((rmin, rmax), (None, None))
betamle = qmle['betastar']
if num > 23000:
Delta = 0.5 * chi2(df=1).isf(q=alpha)
else:
#There are numerical issues with isf for num > 23000
Delta = 0.5 * f.isf(q=alpha, dfn=1, dfd=num-1)
sumwsq = sum(c * w * w for c, w, _ in datagen())
wscale = max(1.0, np.sqrt(sumwsq / num))
rscale = max(1.0, np.abs(rmin), np.abs(rmax))
# solve dual
tiny = 1e-5
logtiny = log(tiny)
def safedenom(x):
return x if x > tiny else exp(logstar(x))
| |
subdir: str,
subcontents: Dict[str, Union[bool, Dict]],
):
"""Updates a directory's content tree with the content tree of a subdirectory."""
def update_dict(base: dict, update: dict):
for key in update:
# "/" can be in *base* if the directory's parent was re-checked
if key in base and key != "/":
update_dict(base[key], update[key])
else:
base[key] = update[key]
path = subdir[len(dir) + 1 :].split(os.sep) if dir else [subdir]
target = path.pop()
path_iter = iter(path)
for branch in path_iter:
try:
contents = contents[branch]
except KeyError:
contents[branch] = {}
contents = contents[branch]
break
for branch in path_iter:
contents[branch] = {}
contents = contents[branch]
if target in contents:
update_dict(contents[target], subcontents)
else:
contents[target] = subcontents
def get_urls(
url_queue: Queue,
images: List[Tuple[str, Image]],
ImageClass: type,
) -> None:
"""Processes URL sources from a/some separate thread(s)"""
source = url_queue.get()
while not interrupted.is_set() and source:
log(f"Getting image from {source!r}", logger, verbose=True)
try:
images.append((basename(source), Image(ImageClass.from_url(source))))
# Also handles `ConnectionTimeout`
except requests.exceptions.ConnectionError:
log(f"Unable to get {source!r}", logger, _logging.ERROR)
except URLNotFoundError as e:
log(str(e), logger, _logging.ERROR)
except PIL.UnidentifiedImageError as e:
log(str(e), logger, _logging.ERROR)
except Exception:
log_exception(f"Getting {source!r} failed", logger, direct=True)
else:
log(f"Done getting {source!r}", logger, verbose=True)
source = url_queue.get()
def open_files(
file_queue: Queue,
images: List[Tuple[str, Image]],
ImageClass: type,
) -> None:
source = file_queue.get()
while not interrupted.is_set() and source:
log(f"Opening {source!r}", logger, verbose=True)
try:
images.append((source, Image(ImageClass.from_file(source))))
except PIL.UnidentifiedImageError as e:
log(str(e), logger, _logging.ERROR)
except OSError as e:
log(f"Could not read {source!r}: {e}", logger, _logging.ERROR)
except Exception:
log_exception(f"Opening {source!r} failed", logger, direct=True)
source = file_queue.get()
def main() -> None:
"""CLI execution sub-entry-point"""
global args, url_images, MAX_DEPTH, RECURSIVE, SHOW_HIDDEN
def check_arg(
name: str,
check: Callable[[Any], Any],
msg: str,
exceptions: Tuple[Exception] = None,
*,
fatal: bool = True,
) -> bool:
"""Performs generic argument value checks and outputs the given message if the
argument value is invalid.
Returns:
``True`` if valid, otherwise ``False``.
If *exceptions* is :
- not given or ``None``, the argument is invalid only if ``check(arg)``
returns a falsy value.
- given, the argument is invalid if ``check(arg)`` raises one of the given
exceptions. It's also invalid if it raises any other exception but the
error message is different.
"""
value = getattr(args, name)
if exceptions:
valid = False
try:
check(value)
valid = True
except exceptions:
pass
except Exception:
log_exception(
f"--{name.replace('_', '-')}: Invalid! See the logs",
direct=True,
fatal=True,
)
else:
valid = check(value)
if not valid:
notify.notify(
f"--{name.replace('_', '-')}: {msg} (got: {value!r})",
level=notify.CRITICAL if fatal else notify.ERROR,
)
return bool(valid)
parser = argparse.ArgumentParser(
prog="term-image",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Display/Browse images in a terminal",
epilog=""" \
'--' should be used to separate positional arguments that begin with an '-' \
from options/flags, to avoid ambiguity.
For example, `$ term-image [options] -- -image.jpg --image.png`
Render Styles:
auto: The best style is automatically determined based on the detected terminal
support.
kitty: Uses the kitty graphics protocol. Currently supported terminal emulators
include (but might not be limited to):
- Kitty >= 0.20.0
- Konsole >= 22.04.0
term: Uses unicode half blocks with 24-bit color escape codes to represent images
with a density of two pixels per character cell.
Using a terminal-graphics-based style not supported by the active terminal is not
allowed by default.
To force a style that is normally unsupported, add the '--force-style' flag.
FOOTNOTES:
1. Width and height are in units of columns and lines repectively.
AUTO size is calculated such that the image always fits into the available
terminal size (i.e terminal size minus allowances) except when `--scroll` is
specified, which allows the image height to go beyond the terminal height.
2. The size is multiplied by the scale on each axis respectively before the image
is rendered. A scale value must be such that 0.0 < value <= 1.0.
3. In CLI mode, only image sources are used, directory sources are skipped.
Animated images are displayed only when animation is disabled (with `--no-anim`)
or when there's only one image source.
4. Any image having more pixels than the specified maximum will be:
- skipped, in CLI mode, if '--max-pixels-cli' is specified.
- replaced, in TUI mode, with a placeholder when displayed but can still be forced
to display or viewed externally.
Note that increasing this should not have any effect on general performance
(i.e navigation, etc) but the larger an image is, the more the time and memory
it'll take to render it. Thus, a large image might delay the rendering of other
images to be rendered immediately after it.
5. Frames will not be cached for any animation with more frames than this value.
Memory usage depends on the frame count per image, not this maximum count.
6. Any event with a level lower than the specified one is not reported.
7. Supports all image formats supported by `PIL.Image.open()`.
See https://pillow.readthedocs.io/en/latest/handbook/image-file-formats.html for
details.
""",
add_help=False, # '-h' is used for HEIGHT
)
# General
general = parser.add_argument_group("General Options")
general.add_argument(
"--help",
action="help",
help="Show this help message and exit",
)
general.add_argument(
"--version",
action="version",
version=__version__,
help="Show the program version and exit",
)
general.add_argument(
"--reset-config",
action="store_true",
help="Restore default config and exit (Overwrites the config file)",
)
general.add_argument(
"-S",
"--style",
choices=("auto", "kitty", "term"),
default="auto",
help='Image render style (default: auto). See "Render Styles" below',
)
general.add_argument(
"--force-style",
action="store_true",
help=(
"Use the specified render style even if it's reported as unsupported by "
"the active terminal"
),
)
font_ratio_options = general.add_mutually_exclusive_group()
font_ratio_options.add_argument(
"-F",
"--font-ratio",
type=float,
metavar="N",
default=config.font_ratio,
help=(
"The width-to-height ratio of a character cell in the terminal, for "
f"correct image proportion (default: {config.font_ratio or 'auto'})"
),
)
font_ratio_options.add_argument(
"--auto-font-ratio",
action="store_true",
help="Determine the font ratio from the terminal, if possible",
)
mode_options = general.add_mutually_exclusive_group()
mode_options.add_argument(
"--cli",
action="store_true",
help=(
"Do not the launch the TUI, instead draw all image sources "
"to the terminal directly [3]"
),
)
mode_options.add_argument(
"--tui",
action="store_true",
help="Always launch the TUI, even for a single image",
)
# # Animation
anim_options = parser.add_argument_group("Animation Options (General)")
anim_options.add_argument(
"-f",
"--frame-duration",
type=float,
metavar="N",
help=(
"The time (in seconds) between frames for all animated images "
"(default: Determined per image from it's metadata OR 0.1)"
),
)
anim_options.add_argument(
"-R",
"--repeat",
type=int,
default=-1,
metavar="N",
help=(
"Number of times to repeat all frames of an animated image; A negative "
"count implies an infinite loop (default: -1)"
),
)
anim_cache_options = anim_options.add_mutually_exclusive_group()
anim_cache_options.add_argument(
"--anim-cache",
type=int,
default=config.anim_cache,
metavar="N",
help=(
"Maximum frame count for animation frames to be cached (Better performance "
f"at the cost of memory) (default: {config.anim_cache}) [5]"
),
)
anim_cache_options.add_argument(
"--cache-all-anim",
action="store_true",
help=(
"Cache frames for all animations (Beware, uses up a lot of memory for "
"animated images with very high frame count)"
),
)
anim_cache_options.add_argument(
"--cache-no-anim",
action="store_true",
help="Disable frame caching (Less memory usage but reduces performance)",
)
anim_options.add_argument(
"--no-anim",
action="store_true",
help=(
"Disable image animation. Animated images are displayed as just their "
"first frame."
),
)
# # Transparency
_alpha_options = parser.add_argument_group(
"Transparency Options (General)",
"NOTE: These are mutually exclusive",
)
alpha_options = _alpha_options.add_mutually_exclusive_group()
alpha_options.add_argument(
"--no-alpha",
action="store_true",
help="Disable image transparency (i.e black background)",
)
alpha_options.add_argument(
"-A",
"--alpha",
type=float,
metavar="N",
default=_ALPHA_THRESHOLD,
help=(
"Alpha ratio above which pixels are taken as opaque (0 <= x < 1) "
f"(default: {_ALPHA_THRESHOLD:f})"
),
)
alpha_options.add_argument(
"-b",
"--alpha-bg",
metavar="COLOR",
help=(
"Hex color (without '#') with which transparent backgrounds should be "
"replaced"
),
)
# CLI-only
cli_options = parser.add_argument_group(
"CLI-only Options",
"These options apply only when there is just one valid image source",
)
size_options = cli_options.add_mutually_exclusive_group()
size_options.add_argument(
"-w",
"--width",
type=int,
metavar="N",
help="Width of the image to be rendered (default: auto) [1]",
)
size_options.add_argument(
"-h",
"--height",
type=int,
metavar="N",
help="Height of the image to be rendered (default: auto) [1]",
)
cli_options.add_argument(
"--h-allow",
type=int,
default=0,
metavar="N",
help=(
"Horizontal allowance i.e minimum number of columns to leave unused "
"(default: 0)"
),
)
cli_options.add_argument(
"--v-allow",
type=int,
default=2,
metavar="N",
help=(
"Vertical allowance i.e minimum number of lines to leave unused "
"(default: 2)"
),
)
cli_options.add_argument(
"--scroll",
action="store_true",
help=(
"Allow the image height to go beyond the terminal height. "
"Not needed when | |
get_member returned nothing.
# This can be fixed with a slight breaking change to the return type,
# i.e. adding discord.Object to the list of it
# However, for now this is an acceptable compromise.
if target is not None:
ret[target] = overwrite
return ret
@property
def category(self):
"""Optional[:class:`~discord.CategoryChannel`]: The category this channel belongs to.
If there is no category then this is ``None``.
"""
return self.guild.get_channel(self.category_id)
@property
def permissions_synced(self):
""":class:`bool`: Whether or not the permissions for this channel are synced with the
category it belongs to.
If there is no category then this is ``False``.
.. versionadded:: 1.3
"""
category = self.guild.get_channel(self.category_id)
return bool(category and category._overwrites == self._overwrites)
def permissions_for(self, member):
"""Handles permission resolution for the current :class:`~discord.Member`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
Parameters
----------
member: :class:`~discord.Member`
The member to resolve permissions for.
Returns
-------
:class:`~discord.Permissions`
The resolved permissions for the member.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
if self.guild.owner_id == member.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
roles = member.roles
# Apply guild roles that the member has.
for role in roles:
base.value |= role.permissions.value
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
# not sure if doing member._roles.get(...) is better than the
# set approach. While this is O(N) to re-create into a set for O(1)
# the direct approach would just be O(log n) for searching with no
# extra memory overhead. For now, I'll keep the set cast
# Note that the member.roles accessor up top also creates a
# temporary list
member_role_ids = {r.id for r in roles}
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'role' and overwrite.id in member_role_ids:
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.type == 'member' and overwrite.id == member.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
return base
async def delete(self, *, reason=None):
"""|coro|
Deletes the channel.
You must have :attr:`~Permissions.manage_channels` permission to use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this channel.
Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have proper permissions to delete the channel.
~discord.NotFound
The channel was not found or was already deleted.
~discord.HTTPException
Deleting the channel failed.
"""
await self._state.http.delete_channel(self.id, reason=reason)
async def set_permissions(self, target, *, overwrite=_undefined, reason=None, **permissions):
r"""|coro|
Sets the channel specific permission overwrites for a target in the
channel.
The ``target`` parameter should either be a :class:`~discord.Member` or a
:class:`~discord.Role` that belongs to guild.
The ``overwrite`` parameter, if given, must either be ``None`` or
:class:`~discord.PermissionOverwrite`. For convenience, you can pass in
keyword arguments denoting :class:`~discord.Permissions` attributes. If this is
done, then you cannot mix the keyword arguments with the ``overwrite``
parameter.
If the ``overwrite`` parameter is ``None``, then the permission
overwrites are deleted.
You must have the :attr:`~Permissions.manage_roles` permission to use this.
Examples
----------
Setting allow and deny: ::
await message.channel.set_permissions(message.author, read_messages=True,
send_messages=False)
Deleting overwrites ::
await channel.set_permissions(member, overwrite=None)
Using :class:`~discord.PermissionOverwrite` ::
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
await channel.set_permissions(member, overwrite=overwrite)
Parameters
-----------
target: Union[:class:`~discord.Member`, :class:`~discord.Role`]
The member or role to overwrite permissions for.
overwrite: Optional[:class:`~discord.PermissionOverwrite`]
The permissions to allow and deny to the target, or `None` to
delete the overwrite.
\*\*permissions
A keyword argument list of permissions to set for ease of use.
Cannot be mixed with ``overwrite``.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have permissions to edit channel specific permissions.
~discord.HTTPException
Editing channel specific permissions failed.
~discord.NotFound
The role or member being edited is not part of the guild.
~discord.InvalidArgument
The overwrite parameter invalid or the target type was not
:class:`~discord.Role` or :class:`~discord.Member`.
"""
http = self._state.http
if isinstance(target, User):
perm_type = 'member'
elif isinstance(target, Role):
perm_type = 'role'
else:
raise InvalidArgument('target parameter must be either Member or Role')
if isinstance(overwrite, _Undefined):
if len(permissions) == 0:
raise InvalidArgument('No overwrite provided.')
try:
overwrite = PermissionOverwrite(**permissions)
except (ValueError, TypeError):
raise InvalidArgument('Invalid permissions given to keyword arguments.')
else:
if len(permissions) > 0:
raise InvalidArgument('Cannot mix overwrite and keyword arguments.')
# TODO: wait for event
if overwrite is None:
await http.delete_channel_permissions(self.id, target.id, reason=reason)
elif isinstance(overwrite, PermissionOverwrite):
(allow, deny) = overwrite.pair()
await http.edit_channel_permissions(self.id, target.id, allow.value, deny.value, perm_type, reason=reason)
else:
raise InvalidArgument('Invalid overwrite type provided.')
async def _clone_impl(self, base_attrs, *, name=None, reason=None):
base_attrs['permission_overwrites'] = [
x._asdict() for x in self._overwrites
]
base_attrs['parent_id'] = self.category_id
base_attrs['name'] = name or self.name
guild_id = self.guild.id
cls = self.__class__
data = await self._state.http.create_channel(guild_id, self.type.value, reason=reason, **base_attrs)
obj = cls(state=self._state, guild=self.guild, data=data)
# temporarily add it to the cache
self.guild._channels[obj.id] = obj
return obj
async def clone(self, *, name=None, reason=None):
"""|coro|
Clones this channel. This creates a channel with the same properties
as this channel.
.. versionadded:: 1.1
Parameters
------------
name: Optional[:class:`str`]
The name of the new channel. If not provided, defaults to this
channel name.
reason: Optional[:class:`str`]
The reason for cloning this channel. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have the proper permissions to create this channel.
~discord.HTTPException
Creating the channel failed.
"""
raise NotImplementedError
async def create_invite(self, *, reason=None, **fields):
"""|coro|
Creates an instant invite.
You must have the :attr:`~Permissions.create_instant_invite` permission to
do this.
Parameters
------------
max_age: :class:`int`
How long the invite should last in seconds. If it's 0 then the invite
doesn't expire. Defaults to 0.
max_uses: :class:`int`
How many uses the invite could be used for. If it's 0 then there
are unlimited uses. Defaults to 0.
temporary: :class:`bool`
Denotes that the invite grants temporary membership
(i.e. they get kicked after they disconnect). Defaults to ``False``.
unique: :class:`bool`
Indicates if a unique invite URL should be created. Defaults to True.
If this is set to ``False`` then it will return a previously created
invite.
reason: Optional[:class:`str`]
The reason for creating this invite. Shows up on the audit log.
Raises
-------
~discord.HTTPException
Invite creation failed.
Returns
--------
:class:`~discord.Invite`
The invite that was created.
"""
data = await self._state.http.create_invite(self.id, reason=reason, **fields)
return Invite.from_incomplete(data=data, state=self._state)
async def invites(self):
"""|coro|
Returns a list of all active instant invites from this channel.
You must have :attr:`~Permissions.manage_guild` to get this information.
Raises
-------
~discord.Forbidden
You do not have proper permissions to get the information.
~discord.HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`~discord.Invite`]
The list of invites that are currently active.
"""
state = self._state
data = await state.http.invites_from_channel(self.id)
result = []
for invite in data:
invite['channel'] = self
invite['guild'] = self.guild
result.append(Invite(state=state, data=invite))
return result
class Messageable(metaclass=abc.ABCMeta):
"""An ABC that details | |
<filename>SideBar.py
import tkinter as tk
import os,sys
import time
from PIL import Image,ImageTk
import threading
from tkinter.ttk import Progressbar, Style, Scale
from tkcolorpicker import askcolor
from tkinter import filedialog
import psutil as ps
from tkinter.scrolledtext import ScrolledText
import tkinter as tk
import tkinter.ttk as ttk
from mutagen import File
import mutagen
from mutagen import mp3
import random
import pygame
import io
right=True
app=None
alpha=.9
bgr='#2C3952'
t=None
run=True
dw=307
dh=824
shuffle=False
song='Select Song'+' '*37
prsntname=song[:42]
s=None
mute=False
dire=None
loc=None
musiclist=None
songnum=None
playing=False
d=dict()
c=0
v=1.0
tv=None
thread=None
hr=0
minit=0
sec=0
totsec=0
totmin=0
tothr=0
pt=0000
totv=None
songinfo=None
totlength=None
timeslider=None
pre=0
songlabel=None
playv=None
i=0
volinfo=None
prevol=1.0
shuffle=False
prelist=list()
live=True
dircontent=None
prevol=1.0
'''----------------------------------------netspeed below--------------------------------'''
speedUp=None
speedDown=None
interface=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)][0]
interface_list_at_startup=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
xpos,ypos=0,0
cntnt=''
oldcnt=''
if(len(interface)==0):
os._exit(0)
if(os.path.exists('C:\\ProgramData\\SideBar\\netinterfacedata.log')):
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','r') as f:
line=str(f.readline()).strip()
interfacelist=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
if(line in interfacelist):
if(ps.net_if_stats()[interface].isup):
interface=line
else:
interface=interfacelist[0]
else:
interface=interfacelist[0]
'''----------------------------------------netspeed above------------------------------------'''
try:
if(os.path.exists('C:\\ProgramData\\SideBar\\sidebardata.log')):
with open('C:\\ProgramData\\SideBar\\sidebardata.log','r') as f:
bgr=str(f.readline()).strip()
else:
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\sidebardata.log','w+') as f:
f.write(bgr)
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\sidebardata.log','w+') as f:
f.write(bgr)
except:
pass
try:
if(os.path.exists('C:\\ProgramData\\SideBar\\position.log')):
with open('C:\\ProgramData\\SideBar\\position.log','r') as f:
if(str(f.readline()).strip()=='right'):
right=True
else:
right=False
except:
pass
try:
if(os.path.exists('C:\\ProgramData\\SideBar\\notes.txt')):
with open('C:\\ProgramData\\SideBar\\notes.txt','r') as f:
cntnt=f.read()
except:
pass
try:
if(os.path.exists('C:\\ProgramData\\SideBar\\mute.txt')):
with open('C:\\ProgramData\\SideBar\\mute.txt','r') as f:
if(str(f.readline()).strip()=='true'):
mute=True
else:
mute=False
except:
pass
def on_closing():
global app,run,live
run=False
pygame.mixer.music.stop()
live=False
app.destroy()
os._exit(0)
def on_hover():
global app
step=16
if(right):
pass
else:
step=-step
for x in reversed(range(int(xhover),int(xleave),step)):
time.sleep(0.001)
app.geometry('%dx%d+%d+%d' % (ws, hs, x, y))
app.geometry('%dx%d+%d+%d' % (ws, hs, xhover, y))
def on_leave():
global app
step=16
if(right):
pass
else:
step=-step
for x in range(int(xhover),int(xleave),step):
time.sleep(0.001)
app.geometry('%dx%d+%d+%d' % (ws, hs, x, y))
app.geometry('%dx%d+%d+%d' % (ws, hs, xleave, y))
def start_hovereffect(event):
t = threading.Thread(target=on_hover, args=())
t.daemon = True
t.start()
def start_leaveeffect(event):
t = threading.Thread(target=on_leave, args=())
t.daemon = True
t.start()
def choosecolor():
global bgr,iconUp,iconDown,speedUp,speedDown,iconTotal,totalUsage,playlabel,prevbut,playbut,nextbut,shufbut,musiclist,backbut,locbut,time_elapsed,total_time,songlabel
value=askcolor(bgr, app)[1]
if(not (value is None)):
bgr=value
for w in widgetlist:
w.configure(background=bgr)
stylescale.configure('TScale',background=bgr)
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\sidebardata.log','w+') as f:
f.write(bgr)
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\sidebardata.log','w+') as f:
f.write(bgr)
def bar():
global app,combo,speedUp,speedDown,run,oldcnt,interface,interface_list_at_startup,s,note,autosavelabel
up=0
down=0
notetime=0
try:
while(run):
time.sleep(1)
notetime+=1
autosavelabel.configure(text='Auto Save in: '+str(31-notetime)+'Sec')
if(notetime>30):
notetime=0
app.wm_attributes('-topmost', 1)
try:
cntnt=note.get("1.0", 'end-1c')
if(not (cntnt=='')):
if(not oldcnt==cntnt):
with open('C:\\ProgramData\\SideBar\\notes.txt','w+') as f:
f.write(cntnt)
autosavelabel.configure(text='Saved!')
oldcnt=cntnt
else:
autosavelabel.configure(text='No Changes(not saved)!')
except:
pass
try:
bp=ps.sensors_battery().percent
progress['value'] = bp
s.configure("LabeledProgressbar",text=" {0}%".format(bp))
if(ps.sensors_battery().power_plugged):
s.configure("LabeledProgressbar",background='#0000c8')
else:
if(bp<20):
s.configure("LabeledProgressbar",background='#ff0000')
elif(bp<30):
s.configure("LabeledProgressbar",background='#ff962a')
elif(bp<50):
s.configure("LabeledProgressbar",background='#ff8600')
elif(bp<60):
s.configure("LabeledProgressbar",background='#a3d900')
elif(bp<80):
s.configure("LabeledProgressbar",background='#00d900')
elif(bp<101):
s.configure("LabeledProgressbar",background='#009800')
app.update()
except:
pass
try:
if(interface in list(dict.keys(ps.net_if_stats()))):
if(not ps.net_if_stats()[interface].isup):
interface_list_new=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
previnter=interface
interface=list(set(interface_list_new).difference(interface_list_at_startup))[0] if(len(list(set(interface_list_new).difference(interface_list_at_startup)))>0) else interface
if(previnter!=interface):
combo.set(interface)
interface_list_at_startup=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
continue
else:
interface_list_new=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
previnter=interface
interface=list(set(interface_list_new).difference(interface_list_at_startup))[0] if(len(list(set(interface_list_new).difference(interface_list_at_startup)))>0) else interface
if(previnter!=interface):
combo.set(interface)
interface_list_at_startup=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
continue
sent=ps.net_io_counters(pernic=True)[interface].bytes_sent
recv=ps.net_io_counters(pernic=True)[interface].bytes_recv
total=(sent+recv)/1000
unitUp=1
unitDown=1
unitTotal=1
upspeed=(sent-up)/1000
downspeed=(recv-down)/1000
if(len(str(int(upspeed)))>=4):
upspeed=upspeed/1000
unitUp=2
if(len(str(int(downspeed)))>=4):
downspeed=downspeed/1000
unitDown=2
if(len(str(int(total)))>=7):
total=total/1000000
unitTotal=3
elif(len(str(int(total)))>=4):
total=total/1000
unitTotal=2
speedUp.config(text='{0:.2f} {1}/s'.format(upspeed,'KB' if unitUp==1 else 'MB'))
speedDown.config(text='{0:.2f} {1}/s'.format(downspeed,'KB' if unitDown==1 else 'MB'))
totalUsage.config(text='{0:.2f} {1}'.format(total,'KB' if unitTotal==1 else 'MB' if unitTotal==2 else 'GB'))
up=sent
down=recv
except:
pass
except:
bp=100
progress['value'] = bp
s.configure("LabeledProgressbar",text=" {0}%".format(bp))
pass
def position():
global right,app,xhover,xleave
if(right):
buttonPosition.configure(text='Right Window')
right=False
xhover =-1
xleave=1-ws
app.geometry('%dx%d+%d+%d' % (ws, hs, xleave, y))
else:
buttonPosition.configure(text='Left Window')
right=True
xleave = app.winfo_screenwidth()-2
xhover=app.winfo_screenwidth()-ws+2
app.geometry('%dx%d+%d+%d' % (ws, hs, xleave, y))
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\position.log','w+') as f:
if(right):
f.write('right')
else:
f.write('left')
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\position.log','w+') as f:
if(right):
f.write('right')
else:
f.write('left')
def resource_path(relative_path):
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def assigninterface(event):
global interface_list_at_startup,interface
interface=event.widget.get()
if(os.path.exists('C:\\ProgramData\\SideBar')):
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
else:
os.mkdir('C:\\ProgramData\\SideBar')
with open('C:\\ProgramData\\SideBar\\netinterfacedata.log','w+') as f:
f.write(interface)
interface_list_at_startup=[itf for itf in list(dict.keys(ps.net_if_stats())) if(ps.net_if_stats()[itf].isup)]
'''---------------------------------netspeed code above--------------------------------'''
'''----------------------------------music player below-----------------------------'''
def filechooser():
global loc,dire,dircontent
dire=filedialog.askdirectory()
with open('C:\\ProgramData\\SideBar\\dir.txt','w') as f:
dircontent=f.write(dire)
dircontent=dire
wlkfunc(dire)
def autowlk(dire):
global c,r,d
d.clear()
c=0
thread3=threading.Thread(target=walk,args=(dire,'.mp3'))
thread3.start()
def wlkfunc(dire):
global c,musiclist,d
d.clear()
c=0
for el in musiclist.get_children():
musiclist.delete(el)
thread3=threading.Thread(target=walk,args=(dire,'.mp3'))
thread3.start()
def walk(dirn,s):
global d,c,musiclist
try:
for i in os.listdir(dirn):
p=os.path.join(dirn,i)
if('$RECYCLE.BIN' in p or '\\System Volume Information' in p or '/System Volume Information' in p):
pass
elif(os.path.isfile(p)):
if(s in str(p)):
try:
c+=1
musiclist.insert("",'end',text=str(c),value=(str(i),),tags = ('odd' if c%2==0 else 'even',))
d[i]=p
except:
pass
else:
try:
walk(p,s)
except:
pass
musiclist.bind("<Double-Button-1>",OnDouble)
musiclist.bind("<Return>",OnDouble)
'''musiclist.bind("<Down>",OnDown)
musiclist.bind("<Up>",OnUp)'''
except Exception as e:
musiclist.insert("",'end',text="n/a",value=(sre(e)),tags = ('odd',))
def OnDouble(event):
global song,songnum,songinfo,totlength,pre,d
pre=0
widget = event.widget
song=widget.item(widget.focus())['values'][0]
collectsonginfoandplay(song)
def collectsonginfoandplay(sng):
global song,songnum,prelist,i,totlength,musiclist
song=sng
for index,child in enumerate(musiclist.get_children()):
if(song==musiclist.item(child)["values"][0]):
songnum=index
break
prelist.append(songnum)
songid=musiclist.get_children()[songnum]
musiclist.selection_set(songid)
musiclist.see(songid)
songinfo=mutagen.File(d[song])
totlength=songinfo.info.length
timeslider.configure(from_=0, to=int(totlength))
totsec=int(totlength)%60
totmin=int(totlength)//60
tothr=totmin//60
totv.set('%02d:%02d:%02d'%(tothr,totmin,totsec))
i=0
play(d[song])
def playorpause():
if playing is False:
continu()
else:
pause()
def play(song):
global playing,playbut,pauimg,thread,playlabel,t,artimg,v
#pygame.init()
try:
pygame.mixer.quit()
mp = mp3.MP3(song)
pygame.mixer.init(frequency=mp.info.sample_rate)
clock = pygame.time.Clock()
pygame.mixer.music.set_volume(v)
pygame.mixer.music.load(song)
pygame.mixer.music.play()
albumart(song)
playlabel.config(image=artimg)
playing=True
playbut.config(image=pauimg)
except Exception as e:
pass
def albumart(song):
global artimg
file = File(song)
try:
artwork = file.tags['APIC:'].data
with open('C:\\ProgramData\\SideBar\\image.jpg', 'wb') as im:
im.write(artwork)
img=Image.open('C:\\ProgramData\\SideBar\\image.jpg')
img.resize((int(ws*(150/dw)),int(ws*(150/dw)))).save('C:\\ProgramData\\SideBar\\art.png')
artimg = tk.PhotoImage(file="C:\\ProgramData\\SideBar\\art.png")
except:
try:
f=File(song)
s=str(mutagen.File(song).tags)
n=s[:s.find('data=b')][s.find('APIC:'):]
n=n[n.find('desc=')+6:]
n=n[:n.find('\'')]
artwork=f.tags['APIC:'+n].data
with open('C:\\ProgramData\\SideBar\\image.jpg', 'wb') as im:
im.write(artwork)
img=Image.open('C:\\ProgramData\\SideBar\\image.jpg')
img.resize((int(ws*(150/dw)),int(ws*(150/dw)))).save('C:\\ProgramData\\SideBar\\art.png')
artimg = tk.PhotoImage(file="C:\\ProgramData\\SideBar\\art.png")
except:
img=Image.open(resource_path("art.png"))
img.resize((int(ws*(150/dw)),int(ws*(150/dw)))).save('C:\\ProgramData\\SideBar\\art.png')
artimg = tk.PhotoImage(file="C:\\ProgramData\\SideBar\\art.png")
def middleplay(event):
global totlength,pre,song
timepos=int((event.x*totlength)/int(ws*(300/dw)))
timeslider.set(timepos)
pygame.mixer.music.stop()
pygame.mixer.music.load(d[song])
pygame.mixer.music.play()
pre=timepos
pygame.mixer.music.set_pos(timepos)
def middlevol(event):
posy=int((hs*140)/dh)-event.y
rng=int(int(posy*100)/int((hs*140)/dh))
v=float('%.1f'%(rng/100))
vol.set(rng)
pygame.mixer.music.set_volume(v)
with open('C:\\ProgramData\\SideBar\\vol.txt','w') as f:
f.write('vol:'+str(v))
def playtime():
global tv,hr,minit,sec,pt,timeslider,pre,i,live
while(live):
try:
pt=pygame.mixer.music.get_pos()
if(pt is -1):
timeslider.set(0)
sec=0
minit=0
hr=0
if(songnum is not None):
i=0
time.sleep(.5)
if(pygame.mixer.music.get_pos() is -1):
nextsong()
else:
pt=str(pt)
pt=pt[:-3]
if(pt is ''):
pt='0000'
pt=int(pt)
pt+=pre
timeslider.set(pt)
sec=pt%60
minit=pt//60
hr=minit//60
pt='%02d:%02d:%02d'%(hr,minit,sec)
tv.set(pt)
time.sleep(1)
except Exception as e:
time.sleep(.5)
pass
def continu():
global playing,playbut,pauimg
pygame.mixer.music.unpause()
playing=True
playbut.config(image=pauimg)
def pause():
global playing,playbut,playimg
pygame.mixer.music.pause()
playing=False
playbut.config(image=playimg)
def nextsong():
global song,songnum,totlength,songinfo,pre,i,musiclist,d,c,shuffle,prelist,prelistindex
pre=0
if(songnum+1>c-1):
songnum=0
else:
songnum+=1
if(shuffle):
songnum=random.randint(0,c+1)
if(songnum in prelist):
del(prelist[prelist.index(songnum)])
prelist.append(songnum)
'''musiclist.activate(songnum)
musiclist.see(songnum)'''
songid=musiclist.get_children()[songnum]
musiclist.selection_set(songid)
musiclist.see(songid)
song = musiclist.item(songid)['values'][0]
songinfo=mutagen.File(d[song])
totlength=songinfo.info.length
timeslider.configure(from_=0, to=int(totlength))
totsec=int(totlength)%60
totmin=int(totlength)//60
tothr=totmin//60
totv.set('%02d:%02d:%02d'%(tothr,totmin,totsec))
i=0
play(d[song])
def previoussong():
global song,songnum,totlength,songinfo,pre,i,musiclist,d,prelist
pre=0
if(songnum-1<0):
songnum=c-1
else:
songnum-=1
if(shuffle):
if(not prelist):
songnum=random.randint(0,c+1)
else:
del(prelist[(len(prelist)-1)])
if(not prelist):
songnum=random.randint(0,c+1)
prelist=[songnum]
else:
songnum=prelist[(len(prelist)-1)]
'''musiclist.activate(songnum)
musiclist.see(songnum) '''
songid=musiclist.get_children()[songnum]
musiclist.selection_set(songid)
musiclist.see(songid)
song = musiclist.item(songid)['values'][0]
songinfo=mutagen.File(d[song])
totlength=songinfo.info.length
timeslider.configure(from_=0, to=int(totlength))
totsec=int(totlength)%60
totmin=int(totlength)//60
tothr=totmin//60
totv.set('%02d:%02d:%02d'%(tothr,totmin,totsec))
i=0
play(d[song])
def muteorunmute():
global mute,prevol,v,audio,theme,colrlst
try:
if(mute):
v=prevol
pygame.mixer.music.set_volume(v)
vol.set(v*100)
mutebut.configure(image=soundimg)
mute=False
with open('C:\\ProgramData\\SideBar\\mute.txt','w') as f:
f.write('false')
else:
prevol=v
v=0.0
pygame.mixer.music.set_volume(v)
mutebut.configure(image=muteimg)
vol.set(v*100)
mute=True
with open('C:\\ProgramData\\SideBar\\mute.txt','w') as f:
f.write('true')
except:
pass
def initMixer():
BUFFER = 3072
FREQ, SIZE, CHAN = getmixerargs()
pygame.mixer.init(FREQ, SIZE, CHAN, BUFFER)
def getmixerargs():
pygame.mixer.init()
freq, size, chan = pygame.mixer.get_init()
return freq, size, chan
def volscroll(event):
global v,vol
if(event.delta<0 and (v-0.1 < 0.0) is False):
voldecrease()
elif(event.delta>0 and (v+0.1 > 1.0)is False):
volincrease()
def volincrease():
global v,vol
v=float('%.1f'%(v))
if(not v>1.0):
v+=0.1
v=float('%.1f'%(v))
pygame.mixer.music.set_volume(v)
vol.set(v*100)
with open('C:\\ProgramData\\SideBar\\vol.txt','w') as f:
f.write('vol:'+str(v))
def voldecrease():
global v,vol
v=float('%.1f'%(v))
if(not v<0.0):
v-=0.1
v=float('%.1f'%(v))
pygame.mixer.music.set_volume(v)
vol.set(v*100)
with open('C:\\ProgramData\\SideBar\\vol.txt','w') as f:
f.write('vol:'+str(v))
def namedisp():
global prsntname,song,i,live
i=0
try:
while(live):
name=song
prsntname=name[i:]
i+=1
playv.set(prsntname)
if(i is len(name)-1):
i=0
time.sleep(0.5)
except:
pygame.mixer.music.stop()
def searchmusic():
global search,musiclist,c,d,backbut,r,loc,locbut,d,refreshbut,nety
for el in musiclist.get_children():
musiclist.delete(el)
searchkey=search.get().lower()
backbut.place(x=int(ws*(205/dw)),y=nety+int(hs*(405/dh)))
locbut.place(x=int(ws*(235/dw)),y=nety+int(hs*(405/dh)))
refreshbut.place(x=int(ws*(265/dw)),y=nety+int(hs*(405/dh)))
c=0
for i in d:
if(searchkey in i.lower()):
c+=1
musiclist.insert("",'end',text=str(c),value=(str(i),),tags = ('odd' if c%2==0 else 'even',))
def backlist():
global dircontent,locbut,d,refreshbut,nety
if(os.path.isdir(dircontent)):
wlkfunc(dircontent)
backbut.place(x=ws,y=nety+int(hs*(405/dh)))
locbut.place(x=int(ws*(205/dw)),y=nety+int(hs*(405/dh)))
refreshbut.place(x=int(ws*(235/dw)),y=nety+int(hs*(405/dh)))
def refresh():
global d
if(os.path.isdir(dircontent)):
wlkfunc(dircontent)
def shufflesong():
global shuffle
if(shuffle is False):
shuffle=True
shufbut.configure(image=shuffleimg)
with open('C:\\ProgramData\\SideBar\\shuf.txt','w') as f:
f.write('shuffle:1')
else:
shuffle=False
shufbut.configure(image=shuffleoffimg)
with open('C:\\ProgramData\\SideBar\\shuf.txt','w') as f:
f.write('shuffle:0')
def clear_entry(event, entry):
entry.delete(0, tk.END)
if(os.path.exists('C:\\ProgramData\\SideBar\\dir.txt')):
with open('C:\\ProgramData\\SideBar\\dir.txt','r') as f:
dircontent=f.readline()
if(os.path.exists('C:\\ProgramData\\SideBar\\vol.txt')):
with open('C:\\ProgramData\\SideBar\\vol.txt','r') as f:
volinfo=f.readline()
else:
volinfo='vol:1.0'
if(os.path.exists('C:\\ProgramData\\SideBar\\shuf.txt')):
with open('C:\\ProgramData\\SideBar\\shuf.txt','r') as f:
shufinfo=f.readline()
if('shuffle' in shufinfo):
if(shufinfo[8] is '1'):
shuffle=True
else:
shuffle=False
'''---------------------------------music player above---------------------------'''
def showFrame1():
global fr,fr2
t = threading.Thread(target=moveright, args=())
t.daemon = True
t.start()
def showFrame2():
global fr,fr2
t = threading.Thread(target=moveleft, args=())
t.daemon = True
t.start()
def moveright():
global fr,fr2
for i in range(0,int(ws),16):
fr.place(x=-int(ws)+i,y=int(hs*(25/dh)))
fr2.place(x=i,y=int(hs*(25/dh)))
time.sleep(.001)
fr.place(x=0,y=int(hs*(25/dh)))
fr2.place(x=int(ws),y=int(hs*(25/dh)))
def moveleft():
global fr,fr2
for i in range(0,int(ws),16):
fr.place(x=-i,y=int(hs*(25/dh)))
fr2.place(x=int(ws)-i,y=int(hs*(25/dh)))
time.sleep(.001)
fr.place(x=-int(ws),y=int(hs*(25/dh)))
fr2.place(x=0,y=int(hs*(25/dh)))
app=tk.Tk()
ws = app.winfo_screenwidth()*(20/100)
hs = app.winfo_screenheight()-40
if(right):
xleave = app.winfo_screenwidth()-2
xhover=app.winfo_screenwidth()-ws+2
else:
xhover =-1
xleave=2-ws
y = -1
app.geometry('%dx%d+%d+%d' % (ws, hs, xleave, y))
mainfr=tk.Frame(app,background='black',height = hs, width =ws)
mainfr.pack()
mainfr.bind("<Enter>",start_hovereffect )
mainfr.bind("<Leave>",start_leaveeffect )
if(bgr is ""):
bgr='#000000'
try:
fr=tk.Frame(mainfr,background=bgr,height = hs, width =ws)
except:
bgr='#000000'
fr=tk.Frame(mainfr,background=bgr,height = hs, width =ws)
fr.place(x=0,y=int(hs*(25/dh)))
buttonClose = tk.Button(mainfr,text='X' ,background='red',height = int(hs*(1/dh)), width =int(ws*(5/dw)),borderwidth=0,command =on_closing,font='Helvetica %d bold'%(int(ws*(10/dw))),state=tk.DISABLED)
#buttonClose.pack(side="right")
buttonClose.place( x =2, y = 0)
buttonClose.bind("<Enter>", lambda event: buttonClose.config(state=tk.NORMAL))
buttonClose.bind("<Leave>", lambda event: buttonClose.config(state=tk.DISABLED))
buttonColor = tk.Button(mainfr,text='Color' ,foreground='white',background='black',height = int(hs*(1/dh)), width =int(ws*(6/dw)),borderwidth=0,command =choosecolor,font='Helvetica %d bold'%(int(ws*(9/dw))))
#buttonClose.pack(side="right")
buttonColor.place( x =int(ws*(50/dw)), y = 0)
buttonColor.bind("<Enter>", lambda event: buttonColor.config(background='white',foreground='black'))
buttonColor.bind("<Leave>", lambda event: buttonColor.config(background='black',foreground='white'))
buttonPosition = tk.Button(mainfr ,foreground='white',background='black',height = int(hs*(1/dh)), width =int(ws*(13/dw)),borderwidth=0,command =position,font='Helvetica %d bold'%(int(ws*(9/dw))))
#buttonClose.pack(side="right")
buttonPosition.place( x =int(ws*(100/dw)), y = 0)
buttonPosition.bind("<Enter>", lambda event: buttonPosition.config(background='white',foreground='black'))
buttonPosition.bind("<Leave>", lambda event: buttonPosition.config(background='black',foreground='white'))
tab1=tk.Button(fr,text='Tab1',foreground='white',background='red',height = int(hs*(1/dh)), width =int(ws*(6/dw)),borderwidth=0,font='Helvetica %d bold'%(int(ws*(9/dw))))
tab1.place(x =int(ws*(50/dw)), y = 2)
tab2=tk.Button(fr,text='Tab2',command=showFrame2,foreground='white',background='black',height = int(hs*(1/dh)), width =int(ws*(6/dw)),borderwidth=0,font='Helvetica %d bold'%(int(ws*(9/dw))))
tab2.place(x =int(ws*(100/dw)), y = 2)
fr2=tk.Frame(mainfr,width=ws,height=hs)
fr2.place(x=ws,y=int(hs*(25/dh)))
fr2.configure(background=bgr)
tab1=tk.Button(fr2,text='Tab1',command=showFrame1,foreground='white',background='black',height = int(hs*(1/dh)), width =int(ws*(6/dw)),borderwidth=0,font='Helvetica %d bold'%(int(ws*(9/dw))))
tab1.place(x =int(ws*(50/dw)), y = 2)
tab2=tk.Button(fr2,text='Tab2',foreground='white',background='red',height = int(hs*(1/dh)), width =int(ws*(6/dw)),borderwidth=0,font='Helvetica %d bold'%(int(ws*(9/dw))))
tab2.place(x =int(ws*(100/dw)), y = 2)
'''note=ScrolledText(fr2,background='yellow',width=int((ws*30)/dw),height=int((hs*25)/dh),font='Helvetica %d'%(int(ws*(12/dw))))
note.place(x=0,y=int(hs*(40/dh)))
note.insert(tk.INSERT, cntnt)'''
autosavelabel = tk.Label(fr2 ,text = "Auto Save in: 0Sec",background='black',foreground='white',font='Helvetica %d bold'%(int(ws*(12/dw))))
autosavelabel.place(x=0,y=int(hs*(40/dh)))
notefr=tk.Frame(fr2,background=bgr)
note=ScrolledText(notefr,background='yellow',font='serif %d bold'%(int(ws*(10/dw))))
note.pack()
notefr.pack()
notefr.place(x=0,y=int(hs*(80/dh)),width=ws,height=hs-int(hs*(80/dh)))
note.insert(tk.INSERT, | |
ok
def do_setup(self):
try:
for setup in self._setups:
setup(*self._current_params)
except NotImplementedError as e:
# allow skipping test
print(f"asv: skipped: {e !r} ")
return True
return False
def redo_setup(self):
if not self._redo_setup_next:
self._redo_setup_next = True
return
self.do_teardown()
self.do_setup()
def do_teardown(self):
for teardown in self._teardowns:
teardown(*self._current_params)
def do_setup_cache(self):
if self._setup_cache is not None:
return self._setup_cache()
def do_run(self):
return self.run(*self._current_params)
def do_profile(self, filename=None):
def method_caller():
run(*params) # noqa:F821 undefined name see #1020 Bug: run() function is not defined
if profile is None:
raise RuntimeError("cProfile could not be imported")
if filename is not None:
if hasattr(method_caller, 'func_code'):
code = method_caller.func_code
else:
code = method_caller.__code__
self.redo_setup()
profile.runctx(
code, {'run': self.func, 'params': self._current_params},
{}, filename)
class TimeBenchmark(Benchmark):
"""
Represents a single benchmark for timing.
"""
name_regex = re.compile(
'^(Time[A-Z_].+)|(time_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "time"
self.unit = "seconds"
self._attr_sources = attr_sources
old = int(_get_first_attr(self._attr_sources, 'processes', 2)) # backward compat.
self.rounds = int(_get_first_attr(self._attr_sources, 'rounds', old))
self._load_vars()
def _load_vars(self):
self.repeat = _get_first_attr(self._attr_sources, 'repeat', 0)
self.min_run_count = _get_first_attr(self._attr_sources, 'min_run_count', 2)
self.number = int(_get_first_attr(self._attr_sources, 'number', 0))
self.sample_time = _get_first_attr(self._attr_sources, 'sample_time', 0.01)
self.warmup_time = _get_first_attr(self._attr_sources, 'warmup_time', -1)
self.timer = _get_first_attr(self._attr_sources, 'timer', wall_timer)
def do_setup(self):
result = Benchmark.do_setup(self)
# For parameterized tests, setup() is allowed to change these
self._load_vars()
return result
def _get_timer(self, *param):
if param:
def func():
self.func(*param)
else:
func = self.func
timer = timeit.Timer(
stmt=func,
setup=self.redo_setup,
timer=self.timer)
return timer
def run(self, *param):
warmup_time = self.warmup_time
if warmup_time < 0:
if '__pypy__' in sys.modules:
warmup_time = 1.0
else:
# Transient effects exist also on CPython, e.g. from
# OS scheduling
warmup_time = 0.1
timer = self._get_timer(*param)
try:
min_repeat, max_repeat, max_time = self.repeat
except (ValueError, TypeError):
if self.repeat == 0:
min_repeat = 1
max_repeat = 10
max_time = 20.0
if self.rounds > 1:
max_repeat //= 2
max_time /= 2.0
else:
min_repeat = self.repeat
max_repeat = self.repeat
max_time = self.timeout
min_repeat = int(min_repeat)
max_repeat = int(max_repeat)
max_time = float(max_time)
samples, number = self.benchmark_timing(timer, min_repeat, max_repeat,
max_time=max_time,
warmup_time=warmup_time,
number=self.number,
min_run_count=self.min_run_count)
samples = [s / number for s in samples]
return {'samples': samples, 'number': number}
def benchmark_timing(self, timer, min_repeat, max_repeat, max_time, warmup_time,
number, min_run_count):
sample_time = self.sample_time
start_time = wall_timer()
run_count = 0
samples = []
def too_slow(num_samples):
# stop taking samples if limits exceeded
if run_count < min_run_count:
return False
if num_samples < min_repeat:
return False
return wall_timer() > start_time + warmup_time + max_time
if number == 0:
# Select number & warmup.
#
# This needs to be done at the same time, because the
# benchmark timings at the beginning can be larger, and
# lead to too small number being selected.
number = 1
while True:
self._redo_setup_next = False
start = wall_timer()
timing = timer.timeit(number)
wall_time = wall_timer() - start
actual_timing = max(wall_time, timing)
run_count += number
if actual_timing >= sample_time:
if wall_timer() > start_time + warmup_time:
break
else:
try:
p = min(10.0, max(1.1, sample_time / actual_timing))
except ZeroDivisionError:
p = 10.0
number = max(number + 1, int(p * number))
if too_slow(1):
return [timing], number
elif warmup_time > 0:
# Warmup
while True:
self._redo_setup_next = False
timing = timer.timeit(number)
run_count += number
if wall_timer() >= start_time + warmup_time:
break
if too_slow(1):
return [timing], number
# Collect samples
while len(samples) < max_repeat:
timing = timer.timeit(number)
run_count += number
samples.append(timing)
if too_slow(len(samples)):
break
return samples, number
class _SeparateProcessTimer:
subprocess_tmpl = textwrap.dedent('''
from __future__ import print_function
from timeit import timeit, default_timer as timer
print(repr(timeit(stmt="""{stmt}""", setup="""{setup}""", number={number}, timer=timer)))
''').strip()
def __init__(self, func):
self.func = func
def timeit(self, number):
stmt = self.func()
if isinstance(stmt, tuple):
stmt, setup = stmt
else:
setup = ""
stmt = textwrap.dedent(stmt)
setup = textwrap.dedent(setup)
stmt = stmt.replace(r'"""', r'\"\"\"')
setup = setup.replace(r'"""', r'\"\"\"')
code = self.subprocess_tmpl.format(stmt=stmt, setup=setup, number=number)
res = subprocess.check_output([sys.executable, "-c", code])
return float(res.strip())
class TimerawBenchmark(TimeBenchmark):
"""
Represents a benchmark for tracking timing benchmarks run once in
a separate process.
"""
name_regex = re.compile(
'^(Timeraw[A-Z_].+)|(timeraw_.+)$')
def _load_vars(self):
TimeBenchmark._load_vars(self)
self.number = int(_get_first_attr(self._attr_sources, 'number', 1))
del self.timer
def _get_timer(self, *param):
if param:
def func():
self.func(*param)
else:
func = self.func
return _SeparateProcessTimer(func)
def do_profile(self, filename=None):
raise ValueError("Raw timing benchmarks cannot be profiled")
class MemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the memory consumption
of an object.
"""
name_regex = re.compile(
'^(Mem[A-Z_].+)|(mem_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "memory"
self.unit = "bytes"
def run(self, *param):
# We can't import asizeof directly, because we haven't loaded
# the asv package in the benchmarking process.
path = os.path.join(
os.path.dirname(__file__), 'extern', 'asizeof.py')
asizeof = importlib.machinery.SourceFileLoader('asizeof', path).load_module()
obj = self.func(*param)
sizeof2 = asizeof.asizeof([obj, obj])
sizeofcopy = asizeof.asizeof([obj, copy.copy(obj)])
return sizeofcopy - sizeof2
class PeakMemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the peak memory consumption
of the whole program.
"""
name_regex = re.compile(
'^(PeakMem[A-Z_].+)|(peakmem_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "peakmemory"
self.unit = "bytes"
def run(self, *param):
self.func(*param)
return get_maxrss()
class TrackBenchmark(Benchmark):
"""
Represents a single benchmark for tracking an arbitrary value.
"""
name_regex = re.compile(
'^(Track[A-Z_].+)|(track_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = _get_first_attr(attr_sources, "type", "track")
self.unit = _get_first_attr(attr_sources, "unit", "unit")
def run(self, *param):
return self.func(*param)
# TODO: Support the creation of custom benchmark types
benchmark_types = [
TimerawBenchmark, TimeBenchmark, MemBenchmark, PeakMemBenchmark, TrackBenchmark
]
class SpecificImporter:
"""
Module importer that only allows loading a given module from the
given path.
Using this enables importing the asv benchmark suite without
adding its parent directory to sys.path. The parent directory can
in principle contain anything, including some version of the
project module (common situation if asv.conf.json is on project
repository top level).
"""
def __init__(self, name, root):
self._name = name
self._root = root
def find_spec(self, fullname, path, target):
if fullname == self._name:
if path is not None:
raise ValueError()
finder = importlib.machinery.PathFinder()
return finder.find_spec(fullname, [self._root], target)
return None
def update_sys_path(root):
sys.meta_path.insert(0, SpecificImporter(os.path.basename(root),
os.path.dirname(root)))
def _get_benchmark(attr_name, module, klass, func):
try:
name = func.benchmark_name
except AttributeError:
name = None
search = attr_name
else:
search = name.split('.')[-1]
for cls in benchmark_types:
if cls.name_regex.match(search):
break
else:
return
# relative to benchmark_dir
mname_parts = module.__name__.split('.', 1)[1:]
if klass is None:
if name is None:
name = ".".join(mname_parts + [func.__name__])
sources = [func, module]
else:
instance = klass()
func = getattr(instance, attr_name)
if name is None:
name = ".".join(mname_parts + [klass.__name__, attr_name])
sources = [func, instance, module]
return cls(name, func, sources)
def disc_modules(module_name, ignore_import_errors=False):
"""
Recursively import a module and all sub-modules in the package
Yields
------
module
Imported module in the package tree
"""
if not ignore_import_errors:
module = import_module(module_name)
else:
try:
module = import_module(module_name)
except BaseException:
traceback.print_exc()
return
yield module
if getattr(module, '__path__', None):
for _, name, _ in pkgutil.iter_modules(module.__path__, module_name + '.'):
for item in disc_modules(name, ignore_import_errors=ignore_import_errors):
yield item
def disc_benchmarks(root, ignore_import_errors=False):
"""
Discover all benchmarks in a given directory tree, yielding Benchmark
objects
For each class definition, looks for any methods with a
special name.
For each free function, yields all functions with a special
name.
"""
root_name = os.path.basename(root)
for module in disc_modules(root_name, ignore_import_errors=ignore_import_errors):
for attr_name, module_attr in (
(k, v) for k, v in module.__dict__.items()
if not k.startswith('_')
):
if (inspect.isclass(module_attr) and
not inspect.isabstract(module_attr)):
for name, class_attr in inspect.getmembers(module_attr):
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(name, module, module_attr,
class_attr)
if benchmark is not None:
yield benchmark
elif inspect.isfunction(module_attr):
benchmark = _get_benchmark(attr_name, module, None, module_attr)
if benchmark is not None:
yield benchmark
def get_benchmark_from_name(root, name, extra_params=None):
"""
Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark.
"""
if '-' in name:
try:
name, param_idx = name.split('-', 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError("Benchmark id %r is invalid" % (name,))
else:
param_idx = None
update_sys_path(root)
benchmark = None
# try to directly import benchmark function by guessing its import module
# name
parts = name.split('.')
for i in [1, 2]:
path = os.path.join(root, *parts[:-i]) + '.py'
if not os.path.isfile(path):
continue
modname = '.'.join([os.path.basename(root)] + | |
<reponame>Tubbz-alt/adam
import logging
from abc import ABC
from itertools import chain
from pathlib import Path
from random import Random
from attr import attrib, attrs, evolve
from attr.validators import instance_of, optional
from immutablecollections import (
ImmutableSet,
ImmutableSetMultiDict,
immutabledict,
immutableset,
immutablesetmultidict,
)
from vistautils.parameters import Parameters
from adam.language import LinguisticDescription
from typing import (
AbstractSet,
Iterable,
List,
Optional,
Sequence,
Union,
Tuple,
Dict,
Mapping,
)
from more_itertools import first
from adam.language_specific.chinese.chinese_phase_1_lexicon import (
GAILA_PHASE_1_CHINESE_LEXICON,
)
from adam.language_specific.english import DETERMINERS
from adam.learner import (
LearningExample,
get_largest_matching_pattern,
graph_without_learner,
)
from adam.learner.alignments import (
LanguagePerceptionSemanticAlignment,
PerceptionSemanticAlignment,
)
from adam.learner.cross_situational_learner import AbstractCrossSituationalLearner
from adam.learner.language_mode import LanguageMode
from adam.learner.learner_utils import (
assert_static_situation,
candidate_object_hypotheses,
covers_entire_utterance,
get_objects_from_perception,
)
from adam.learner.object_recognizer import (
ObjectRecognizer,
PerceptionGraphFromObjectRecognizer,
extract_candidate_objects,
replace_match_with_object_graph_node,
)
from adam.learner.perception_graph_template import PerceptionGraphTemplate
from adam.learner.propose_but_verify import AbstractProposeButVerifyLearner
from adam.learner.pursuit import (
AbstractPursuitLearner,
HypothesisLogger,
AbstractPursuitLearnerNew,
)
from adam.learner.subset import (
AbstractTemplateSubsetLearner,
AbstractTemplateSubsetLearnerNew,
)
from adam.learner.surface_templates import (
SurfaceTemplate,
SurfaceTemplateBoundToSemanticNodes,
)
from adam.learner.template_learner import (
AbstractTemplateLearner,
AbstractTemplateLearnerNew,
TemplateLearner,
)
from adam.ontology.ontology import Ontology
from adam.ontology.phase1_ontology import GAILA_PHASE_1_ONTOLOGY
from adam.ontology.phase1_spatial_relations import Region
from adam.perception import ObjectPerception, PerceptualRepresentation, MatchMode
from adam.perception.deprecated import LanguageAlignedPerception
from adam.perception.developmental_primitive_perception import (
DevelopmentalPrimitivePerceptionFrame,
RgbColorPerception,
)
from adam.perception.perception_graph import (
PerceptionGraph,
PerceptionGraphPattern,
PerceptionGraphPatternMatch,
GraphLogger,
)
from adam.random_utils import RandomChooser
from adam.semantics import (
Concept,
ObjectConcept,
GROUND_OBJECT_CONCEPT,
SemanticNode,
ObjectSemanticNode,
FunctionalObjectConcept,
SyntaxSemanticsVariable,
)
from adam.utils import networkx_utils
from adam.utils.networkx_utils import subgraph
class AbstractObjectTemplateLearnerNew(AbstractTemplateLearnerNew):
# pylint:disable=abstract-method
def _can_learn_from(
self, language_perception_semantic_alignment: LanguagePerceptionSemanticAlignment
) -> bool:
# We can try to learn objects from anything, as long as the scene isn't already
# completely understood.
return (
not language_perception_semantic_alignment.language_concept_alignment.is_entirely_aligned
)
def _preprocess_scene(
self, perception_semantic_alignment: PerceptionSemanticAlignment
) -> PerceptionSemanticAlignment:
# Avoid accidentally identifying a word with the learner itself.
return perception_semantic_alignment.copy_with_updated_graph_and_added_nodes(
new_graph=graph_without_learner(
perception_semantic_alignment.perception_graph
),
new_nodes=[],
)
def _candidate_templates(
self, language_perception_semantic_alignment: LanguagePerceptionSemanticAlignment
) -> AbstractSet[SurfaceTemplateBoundToSemanticNodes]:
# We can only learn single words for objects at the moment.
# See https://github.com/isi-vista/adam/issues/793 .
# Attempt to align every unaligned token to some object in the scene.
language_alignment = (
language_perception_semantic_alignment.language_concept_alignment
)
ret = immutableset(
SurfaceTemplateBoundToSemanticNodes(
SurfaceTemplate.for_object_name(token, language_mode=self._language_mode),
slot_to_semantic_node={},
)
for (tok_idx, token) in enumerate(
language_alignment.language.as_token_sequence()
)
if not language_alignment.token_index_is_aligned(tok_idx)
# ignore determiners
and token not in DETERMINERS
)
return immutableset(
bound_surface_template
for bound_surface_template in ret
# For now, we require templates to account for the entire utterance.
# See https://github.com/isi-vista/adam/issues/789
if covers_entire_utterance(
bound_surface_template, language_alignment, ignore_determiners=True
)
)
def _enrich_post_process(
self,
perception_graph_after_matching: PerceptionGraph,
immutable_new_nodes: AbstractSet[SemanticNode],
) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
object_root_nodes = immutableset( # pylint:disable=protected-access
node
for node in perception_graph_after_matching._graph.nodes # pylint:disable=protected-access
if isinstance(node, ObjectPerception)
)
new_nodes = []
perception_graph_after_processing = perception_graph_after_matching
for object_root_node in object_root_nodes:
fake_subgraph = subgraph( # pylint:disable=protected-access
perception_graph_after_matching._graph, # pylint:disable=protected-access
[object_root_node],
)
fake_perception_graph = PerceptionGraph(
graph=fake_subgraph, dynamic=perception_graph_after_matching.dynamic
)
fake_pattern_graph = PerceptionGraphPattern.from_graph(fake_perception_graph)
fake_object_semantic_node = ObjectSemanticNode(
concept=FunctionalObjectConcept("unknown_object")
)
# perception_graph_after_processing = replace_match_root_with_object_semantic_node(
# object_semantic_node=fake_object_semantic_node,
perception_graph_after_processing = replace_match_with_object_graph_node(
matched_object_node=fake_object_semantic_node,
current_perception=perception_graph_after_processing,
pattern_match=PerceptionGraphPatternMatch(
matched_pattern=fake_pattern_graph.perception_graph_pattern,
graph_matched_against=perception_graph_after_matching,
matched_sub_graph=fake_perception_graph,
pattern_node_to_matched_graph_node=fake_pattern_graph.perception_graph_node_to_pattern_node,
),
).perception_graph_after_replacement
new_nodes.append(fake_object_semantic_node)
return (
perception_graph_after_processing,
immutableset(chain(immutable_new_nodes, new_nodes)),
)
class AbstractObjectTemplateLearner(AbstractTemplateLearner, ABC):
def _assert_valid_input(
self,
to_check: Union[
LearningExample[DevelopmentalPrimitivePerceptionFrame, LinguisticDescription],
PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame],
],
) -> None:
assert_static_situation(to_check)
def _extract_perception_graph(
self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame]
) -> PerceptionGraph:
return PerceptionGraph.from_frame(perception.frames[0])
def _preprocess_scene_for_learning(
self, language_concept_alignment: LanguageAlignedPerception
) -> LanguageAlignedPerception:
return evolve(
language_concept_alignment,
perception_graph=self._common_preprocessing(
language_concept_alignment.perception_graph
),
)
def _preprocess_scene_for_description(
self, perception_graph: PerceptionGraph
) -> PerceptionGraphFromObjectRecognizer:
return PerceptionGraphFromObjectRecognizer(
self._common_preprocessing(perception_graph),
description_to_matched_object_node=immutabledict(),
)
def _common_preprocessing(self, perception_graph: PerceptionGraph) -> PerceptionGraph:
return graph_without_learner(perception_graph)
def _extract_surface_template(
self,
language_concept_alignment: LanguageAlignedPerception,
language_mode: LanguageMode = LanguageMode.ENGLISH,
) -> SurfaceTemplate:
return SurfaceTemplate(
language_concept_alignment.language.as_token_sequence(),
language_mode=self._language_mode,
)
@attrs
class ObjectPursuitLearner(AbstractPursuitLearner, AbstractObjectTemplateLearner):
"""
An implementation of pursuit learner for object recognition
"""
def _candidate_hypotheses(
self, language_aligned_perception: LanguageAlignedPerception
) -> Sequence[PerceptionGraphTemplate]:
"""
Given a learning input, returns all possible meaning hypotheses.
"""
return [
self._hypothesis_from_perception(object_)
for object_ in self._candidate_perceptions(
language_aligned_perception.perception_graph
)
]
def get_objects_from_perception(
self, observed_perception_graph: PerceptionGraph
) -> List[PerceptionGraph]:
perception_as_digraph = observed_perception_graph.copy_as_digraph()
perception_as_graph = perception_as_digraph.to_undirected()
meanings = []
# 1) Take all of the obj perc that dont have part of relationships with anything else
root_object_percetion_nodes = []
for node in perception_as_graph.nodes:
if isinstance(node, ObjectPerception) and node.debug_handle != "the ground":
if not any(
[
u == node and str(data["label"]) == "partOf"
for u, v, data in perception_as_digraph.edges.data()
]
):
root_object_percetion_nodes.append(node)
# 2) for each of these, walk along the part of relationships backwards,
# i.e find all of the subparts of the root object
for root_object_perception_node in root_object_percetion_nodes:
# Iteratively get all other object perceptions that connect to a root with a part of
# relation
all_object_perception_nodes = [root_object_perception_node]
frontier = [root_object_perception_node]
updated = True
while updated:
updated = False
new_frontier = []
for frontier_node in frontier:
for node in perception_as_graph.neighbors(frontier_node):
edge_data = perception_as_digraph.get_edge_data(
node, frontier_node, default=-1
)
if edge_data != -1 and str(edge_data["label"]) == "partOf":
new_frontier.append(node)
if new_frontier:
all_object_perception_nodes.extend(new_frontier)
updated = True
frontier = new_frontier
# Now we have a list of all perceptions that are connected
# 3) For each of these objects including root object, get axes, properties,
# and relations and regions which are between these internal object perceptions
other_nodes = []
for node in all_object_perception_nodes:
for neighbor in perception_as_graph.neighbors(node):
# Filter out regions that don't have a reference in all object perception nodes
# TODO: We currently remove colors to achieve a match - otherwise finding
# patterns fails.
if (
isinstance(neighbor, Region)
and neighbor.reference_object not in all_object_perception_nodes
or isinstance(neighbor, RgbColorPerception)
):
continue
# Append all other none-object nodes to be kept in the subgraph
if not isinstance(neighbor, ObjectPerception):
other_nodes.append(neighbor)
generated_subgraph = networkx_utils.subgraph(
perception_as_digraph, all_object_perception_nodes + other_nodes
)
meanings.append(PerceptionGraph(generated_subgraph))
logging.info(f"Got {len(meanings)} candidate meanings")
return meanings
def _hypothesis_from_perception(
self, perception: PerceptionGraph
) -> PerceptionGraphTemplate:
return PerceptionGraphTemplate(
graph_pattern=PerceptionGraphPattern.from_graph(
perception
).perception_graph_pattern
)
def _candidate_perceptions(self, observed_perception_graph) -> List[PerceptionGraph]:
return self.get_objects_from_perception(observed_perception_graph)
def _matches(
self,
*,
hypothesis: PerceptionGraphTemplate,
observed_perception_graph: PerceptionGraph,
) -> bool:
matcher = hypothesis.graph_pattern.matcher(
observed_perception_graph, match_mode=MatchMode.OBJECT
)
return any(
matcher.matches(
use_lookahead_pruning=True, graph_logger=self._hypothesis_logger
)
)
@attrs(frozen=True)
class ObjectHypothesisPartialMatch(AbstractPursuitLearner.PartialMatch):
partial_match_hypothesis: Optional[PerceptionGraphTemplate] = attrib(
validator=optional(instance_of(PerceptionGraphTemplate))
)
num_nodes_matched: int = attrib(validator=instance_of(int), kw_only=True)
num_nodes_in_pattern: int = attrib(validator=instance_of(int), kw_only=True)
def matched_exactly(self) -> bool:
return self.num_nodes_matched == self.num_nodes_in_pattern
def match_score(self) -> float:
return self.num_nodes_matched / self.num_nodes_in_pattern
def _find_partial_match(
self, hypothesis: PerceptionGraphTemplate, graph: PerceptionGraph
) -> "ObjectPursuitLearner.ObjectHypothesisPartialMatch":
pattern = hypothesis.graph_pattern
hypothesis_pattern_common_subgraph = get_largest_matching_pattern(
pattern,
graph,
debug_callback=self._debug_callback,
graph_logger=self._hypothesis_logger,
ontology=self._ontology,
match_mode=MatchMode.OBJECT,
)
self.debug_counter += 1
leading_hypothesis_num_nodes = len(pattern)
num_nodes_matched = (
len(hypothesis_pattern_common_subgraph.copy_as_digraph().nodes)
if hypothesis_pattern_common_subgraph
else 0
)
return ObjectPursuitLearner.ObjectHypothesisPartialMatch(
PerceptionGraphTemplate(graph_pattern=hypothesis_pattern_common_subgraph)
if hypothesis_pattern_common_subgraph
else None,
num_nodes_matched=num_nodes_matched,
num_nodes_in_pattern=leading_hypothesis_num_nodes,
)
def _find_identical_hypothesis(
self,
new_hypothesis: PerceptionGraphTemplate,
candidates: Iterable[PerceptionGraphTemplate],
) -> Optional[PerceptionGraphTemplate]:
for candidate in candidates:
if new_hypothesis.graph_pattern.check_isomorphism(candidate.graph_pattern):
return candidate
return None
def _are_isomorphic(
self, h: PerceptionGraphTemplate, hypothesis: PerceptionGraphTemplate
) -> bool:
return h.graph_pattern.check_isomorphism(hypothesis.graph_pattern)
@staticmethod
def from_parameters(
params: Parameters, *, graph_logger: Optional[HypothesisLogger] = None
) -> "ObjectPursuitLearner": # type: ignore
log_word_hypotheses_dir = params.optional_creatable_directory(
"log_word_hypotheses_dir"
)
if log_word_hypotheses_dir:
logging.info("Hypotheses will be logged to %s", log_word_hypotheses_dir)
rng = Random()
rng.seed(params.optional_integer("random_seed", default=0))
return ObjectPursuitLearner(
learning_factor=params.floating_point("learning_factor"),
graph_match_confirmation_threshold=params.floating_point(
"graph_match_confirmation_threshold"
),
lexicon_entry_threshold=params.floating_point("lexicon_entry_threshold"),
smoothing_parameter=params.floating_point("smoothing_parameter"),
hypothesis_logger=graph_logger,
log_learned_item_hypotheses_to=log_word_hypotheses_dir,
rng=rng,
ontology=GAILA_PHASE_1_ONTOLOGY,
language_mode=params.enum(
"language_mode", LanguageMode, default=LanguageMode.ENGLISH
),
)
def log_hypotheses(self, log_output_path: Path) -> None:
for (surface_template, hypothesis) in self._lexicon.items():
template_string = surface_template.to_short_string()
hypothesis.render_to_file(template_string, log_output_path / template_string)
@attrs(slots=True)
class SubsetObjectLearner(AbstractTemplateSubsetLearner, AbstractObjectTemplateLearner):
"""
An implementation of `TopLevelLanguageLearner` for subset learning based approach for single object detection.
"""
def _hypothesis_from_perception(
self, preprocessed_input: LanguageAlignedPerception
) -> PerceptionGraphTemplate:
new_hypothesis = PerceptionGraphPattern.from_graph(
preprocessed_input.perception_graph
).perception_graph_pattern
return PerceptionGraphTemplate(
graph_pattern=new_hypothesis,
template_variable_to_pattern_node=immutabledict(),
)
def _update_hypothesis(
self,
previous_pattern_hypothesis: PerceptionGraphTemplate,
current_pattern_hypothesis: PerceptionGraphTemplate,
) -> Optional[PerceptionGraphTemplate]:
return previous_pattern_hypothesis.intersection(
current_pattern_hypothesis,
ontology=self._ontology,
match_mode=MatchMode.OBJECT,
allowed_matches=immutablesetmultidict(
[
(node2, node1)
for previous_slot, node1 in previous_pattern_hypothesis.template_variable_to_pattern_node.items()
for new_slot, node2 in current_pattern_hypothesis.template_variable_to_pattern_node.items()
if previous_slot == new_slot
]
),
)
@attrs(slots=True)
class SubsetObjectLearnerNew(
AbstractObjectTemplateLearnerNew, AbstractTemplateSubsetLearnerNew
):
"""
An implementation of `TopLevelLanguageLearner` for subset learning based approach for single object detection.
"""
def _new_concept(self, debug_string: str) -> ObjectConcept:
return ObjectConcept(debug_string)
def _hypotheses_from_perception(
self,
learning_state: LanguagePerceptionSemanticAlignment,
bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
) -> AbstractSet[PerceptionGraphTemplate]:
if bound_surface_template.slot_to_semantic_node:
raise RuntimeError(
"Object learner should not have slot to semantic node alignments!"
)
return immutableset(
PerceptionGraphTemplate(
graph_pattern=PerceptionGraphPattern.from_graph(
candidate_object
).perception_graph_pattern,
template_variable_to_pattern_node=immutabledict(),
)
for candidate_object in extract_candidate_objects(
learning_state.perception_semantic_alignment.perception_graph,
sort_by_increasing_size=False,
)
)
# I can't spot the difference in arguments pylint claims?
def _keep_hypothesis( # pylint: disable=arguments-differ
self,
hypothesis: PerceptionGraphTemplate,
bound_surface_template: SurfaceTemplateBoundToSemanticNodes, # pylint:disable=unused-argument
) -> bool:
if len(hypothesis.graph_pattern) < 3:
# A two node graph is to small to meaningfully describe an object
return False
if all(isinstance(node, ObjectPerception) for node in hypothesis.graph_pattern):
# A hypothesis which consists of just sub-object structure
# with no other content is insufficiently distinctive.
return False
return True
def _update_hypothesis(
self,
previous_pattern_hypothesis: PerceptionGraphTemplate,
current_pattern_hypothesis: PerceptionGraphTemplate,
) -> Optional[PerceptionGraphTemplate]:
return previous_pattern_hypothesis.intersection(
current_pattern_hypothesis,
ontology=self._ontology,
match_mode=MatchMode.OBJECT,
allowed_matches=immutablesetmultidict(
[
(node2, node1)
for previous_slot, | |
assert func(X[1], X[2]).round(5) == 0.67568
# In[35]:
def dice(u, v):
##### YOUR CODE HERE
return 1 - (2*np.sum(np.minimum(u, v))) / np.sum(u + v)
# In[36]:
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_dice_implementation(dice)
# ### t-test reweighting [2 points]
#
#
# The t-test statistic can be thought of as a reweighting scheme. For a count matrix $X$, row index $i$, and column index $j$:
#
# $$\textbf{ttest}(X, i, j) =
# \frac{
# P(X, i, j) - \big(P(X, i, *)P(X, *, j)\big)
# }{
# \sqrt{(P(X, i, *)P(X, *, j))}
# }$$
#
# where $P(X, i, j)$ is $X_{ij}$ divided by the total values in $X$, $P(X, i, *)$ is the sum of the values in row $i$ of $X$ divided by the total values in $X$, and $P(X, *, j)$ is the sum of the values in column $j$ of $X$ divided by the total values in $X$.
#
# For this problem, implement this reweighting scheme. You can use `test_ttest_implementation` below to check that your implementation is correct. You do not need to use this for any evaluations, though we hope you will be curious enough to do so!
# In[37]:
def test_ttest_implementation(func):
"""`func` should be an implementation of t-test reweighting as
defined above.
"""
X = pd.DataFrame(np.array([
[ 4., 4., 2., 0.],
[ 4., 61., 8., 18.],
[ 2., 8., 10., 0.],
[ 0., 18., 0., 5.]]))
actual = np.array([
[ 0.33056, -0.07689, 0.04321, -0.10532],
[-0.07689, 0.03839, -0.10874, 0.07574],
[ 0.04321, -0.10874, 0.36111, -0.14894],
[-0.10532, 0.07574, -0.14894, 0.05767]])
predicted = func(X)
assert np.array_equal(predicted.round(5), actual)
# In[38]:
def ttest(df):
##### YOUR CODE HERE
x = df.values # (m, n)
p_xij = x / np.sum(x) # (m, n)
p_xi = x.sum(axis=1) / np.sum(x) # m
p_xj = x.sum(axis=0) / np.sum(x) # n
p_xi = p_xi[:, np.newaxis] # (m, 1)
p_xj = p_xj[np.newaxis, :] # (1, n)
prod = np.dot(p_xi, p_xj) # (m, n)
num = p_xij - prod # (m, n)
den = np.sqrt(prod) # (m, n)
val = num/den # (m, n)
df_val = pd.DataFrame(val, index=df.index, columns=df.columns)
return df_val
# In[39]:
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_ttest_implementation(ttest)
# ### Enriching a VSM with subword information [2 points]
#
# It might be useful to combine character-level information with word-level information. To help you begin asssessing this idea, this question asks you to write a function that modifies an existing VSM so that the representation for each word $w$ is the element-wise sum of $w$'s original word-level representation with all the representations for the n-grams $w$ contains.
#
# The following starter code should help you structure this and clarify the requirements, and a simple test is included below as well.
#
# You don't need to write a lot of code; the motivation for this question is that the function you write could have practical value.
# In[40]:
def subword_enrichment(df, n=4):
# 1. Use `vsm.ngram_vsm` to create a character-level
# VSM from `df`, using the above parameter `n` to
# set the size of the ngrams.
##### YOUR CODE HERE
df_ngram_vsm = vsm.ngram_vsm(df, n)
# 2. Use `vsm.character_level_rep` to get the representation
# for every word in `df` according to the character-level
# VSM you created above.
##### YOUR CODE HERE
char_level_rep = []
for word in df.index:
char_level_word = vsm.character_level_rep(word, df_ngram_vsm, n)
char_level_rep.append(char_level_word)
char_level_rep = np.stack(char_level_rep)
# 3. For each representation created at step 2, add in its
# original representation from `df`. (This should use
# element-wise addition; the dimensionality of the vectors
# will be unchanged.)
##### YOUR CODE HERE
df_subword = df + char_level_rep
# 4. Return a `pd.DataFrame` with the same index and column
# values as `df`, but filled with the new representations
# created at step 3.
##### YOUR CODE HERE
return df_subword
# In[41]:
def test_subword_enrichment(func):
"""`func` should be an implementation of subword_enrichment as
defined above.
"""
vocab = ["ABCD", "BCDA", "CDAB", "DABC"]
df = pd.DataFrame([
[1, 1, 2, 1],
[3, 4, 2, 4],
[0, 0, 1, 0],
[1, 0, 0, 0]], index=vocab)
expected = pd.DataFrame([
[14, 14, 18, 14],
[22, 26, 18, 26],
[10, 10, 14, 10],
[14, 10, 10, 10]], index=vocab)
new_df = func(df, n=2)
assert np.array_equal(expected.columns, new_df.columns), "Columns are not the same"
assert np.array_equal(expected.index, new_df.index), "Indices are not the same"
assert np.array_equal(expected.values, new_df.values), "Co-occurrence values aren't the same"
# In[42]:
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_subword_enrichment(subword_enrichment)
# ### Your original system [3 points]
#
# This question asks you to design your own model. You can of course include steps made above (ideally, the above questions informed your system design!), but your model should not be literally identical to any of the above models. Other ideas: retrofitting, autoencoders, GloVe, subword modeling, ...
#
# Requirements:
#
# 1. Your code must operate on one of the count matrices in `data/vsmdata`. You can choose which one. __Other pretrained vectors cannot be introduced__.
#
# 1. Your code must be self-contained, so that we can work with your model directly in your homework submission notebook. If your model depends on external data or other resources, please submit a ZIP archive containing these resources along with your submission.
#
# In the cell below, please provide a brief technical description of your original system, so that the teaching team can gain an understanding of what it does. This will help us to understand your code and analyze all the submissions to identify patterns and strategies.
# In[46]:
# Enter your system description in this cell.
# This is my code description attached here as requested
# The main components of this original system are as follows:
# (a) use scaled counts, instead of flat counts. We observed scaled counts performed better
# (b) use of PMI helps
# (c) we train a function for distfunc.
# For this we use all 4 of the similarity/relatednedd data sets
# We use lightgbm.LGBMRegressor to learn this function.
# My peak score was: 0.905132
# This is my code
# You can see that my system is based on the description above.
if 'IS_GRADESCOPE_ENV' not in os.environ:
# pass
##### YOUR CODE HERE
#imdb5 = pd.read_csv(os.path.join(VSM_HOME, "imdb_window5-scaled.csv.gz"), index_col=0)
#imdb5_pmi = vsm.pmi(imdb5)
#eval_results = full_word_similarity_evaluation(imdb5_pmi)
#print(eval_results)
from sklearn import linear_model
import lightgbm
class DistFuncRegressor:
def __init__(self):
#self.model = linear_model.LinearRegression()
self.model = lightgbm.LGBMRegressor()
def train(self, X, y):
self.model.fit(X, y)
def distfunc(self, a, b):
X_test = a - b
return self.model.predict([X_test])
giga5 = pd.read_csv(os.path.join(VSM_HOME, "giga_window5-scaled.csv.gz"), index_col=0)
giga5_pmi = vsm.pmi(giga5)
#lets try to train a model
X = []
y = []
for reader in READERS:
y_temp = []
for w1, w2, score in reader():
w1_values = giga5_pmi.loc[w1].values
w2_values = giga5_pmi.loc[w2].values
item = w1_values - w2_values
X.append(item)
y_temp.append(score)
# normalize y
y_temp = np.array(y_temp)
y_temp = (y_temp - np.min(y_temp)) / np.ptp(y_temp) # normalize in range [0, 1]
y.append(y_temp)
# stack X's vertically, and y horizontally.
X = np.vstack(X)
y = np.hstack(y)
regressor = DistFuncRegressor()
regressor.train(X, y)
eval_results = full_word_similarity_evaluation(giga5_pmi, distfunc=regressor.distfunc)
print(eval_results)
# Please do not remove this comment.
# ## Bake-off [1 point]
#
# For the bake-off, we will release two additional datasets. The announcement will go out on the discussion forum. We will also release reader code for these datasets that you can paste into this notebook. You will evaluate your custom model $M$ (from the previous question) on these new datasets using `full_word_similarity_evaluation`. Rules:
#
# 1. Only one evaluation is permitted.
# 1. No additional system tuning is permitted once the bake-off has started.
#
# The cells below this one constitute your bake-off entry.
#
# People who enter will receive the additional homework point, and people whose systems achieve the top score will receive an additional 0.5 points. We will test the top-performing systems ourselves, and only systems for which we can reproduce the reported results will win the extra 0.5 points.
#
# Late entries will be accepted, but they cannot earn the extra 0.5 points. Similarly, you cannot | |
int(temp.split('%s_'%readme_name)[1]))
else:
numFile = 1
readme_name = 'ReadMe_%s.txt'%(numFile+1)
readme = open('%s%s/%s'%(path, outDir, readme_name), 'w')
readme.write(update)
readme.close()
# ------------------------------------------------------------------------
# setup all the slicers. set up randomSeed for random/repRandom strategies through stackerList.
slicer = {}
stackerList = {}
if specifiedDith is not None:
# would like to add all the stackers first and then keep only the one that is specified
bestDithOnly, noDithOnly = False, False
if bestDithOnly:
stackerList['RandomDitherFieldPerVisit'] = [mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
randomSeed=1000)]
slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerVisitRa',
latCol='randomDitherFieldPerVisitDec',
latLonDeg=raDecInDeg,
nside=nside, useCache=False)
else:
slicer['NoDither'] = slicers.HealpixSlicer(lonCol='fieldRA', latCol='fieldDec', latLonDeg=raDecInDeg,
nside=nside, useCache=False)
if not noDithOnly:
# random dithers on different timescales
stackerList['RandomDitherPerNight'] = [mafStackers.RandomDitherPerNightStacker(degrees=raDecInDeg,
randomSeed=1000)]
stackerList['RandomDitherFieldPerNight'] = [mafStackers.RandomDitherFieldPerNightStacker(degrees=raDecInDeg, randomSeed=1000)]
stackerList['RandomDitherFieldPerVisit'] = [mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg, randomSeed=1000)]
# rep random dithers on different timescales
#stackerList['RepulsiveRandomDitherPerNight'] = [myStackers.RepulsiveRandomDitherPerNightStacker(degrees=raDecInDeg,
# randomSeed=1000)]
#stackerList['RepulsiveRandomDitherFieldPerNight'] = [myStackers.RepulsiveRandomDitherFieldPerNightStacker(degrees=raDecInDeg,
# randomSeed=1000)]
#stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
# randomSeed=1000)]
# set up slicers for different dithers
# random dithers on different timescales
slicer['RandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='randomDitherPerNightRa',
latCol='randomDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['RandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerNightRa',
latCol='randomDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerVisitRa',
latCol='randomDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# rep random dithers on different timescales
#slicer['RepulsiveRandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherPerNightRa',
# latCol='repulsiveRandomDitherPerNightDec',
# latLonDeg=raDecInDeg, nside=nside, useCache=False)
#slicer['RepulsiveRandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerNightRa',
# latCol='repulsiveRandomDitherFieldPerNightDec',
# latLonDeg=raDecInDeg, nside=nside,
# useCache=False)
#slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
# latCol='repulsiveRandomDitherFieldPerVisitDec',
# latLonDeg=raDecInDeg, nside=nside,
# useCache=False)
# spiral dithers on different timescales
slicer['FermatSpiralDitherPerNight'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherPerNightRa',
latCol='fermatSpiralDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['FermatSpiralDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherFieldPerNightRa',
latCol='fermatSpiralDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['FermatSpiralDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherFieldPerVisitRa',
latCol='fermatSpiralDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# hex dithers on different timescales
slicer['SequentialHexDitherPerNight'] = slicers.HealpixSlicer(lonCol='hexDitherPerNightRa',
latCol='hexDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='hexDitherFieldPerNightRa',
latCol='hexDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SequentialHexDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='hexDitherFieldPerVisitRa',
latCol='hexDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# per season dithers
slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(lonCol='pentagonDitherPerSeasonRa',
latCol='pentagonDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['PentagonDiamondDitherPerSeason'] = slicers.HealpixSlicer(lonCol='pentagonDiamondDitherPerSeasonRa',
latCol='pentagonDiamondDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SpiralDitherPerSeason'] = slicers.HealpixSlicer(lonCol='spiralDitherPerSeasonRa',
latCol='spiralDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# ------------------------------------------------------------------------
if specifiedDith is not None:
stackerList_, slicer_ = {}, {}
if isinstance(specifiedDith, str):
if specifiedDith in slicer.keys():
if specifiedDith.__contains__('Random'):
# only Random dithers have a stacker object for rand seed specification
stackerList_[specifiedDith] = stackerList[specifiedDith]
slicer_[specifiedDith] = slicer[specifiedDith]
elif isinstance(specifiedDith, list):
for specific in specifiedDith:
if specific in slicer.keys():
if specific.__contains__('Random'):
# only Random dithers have a stacker object for rand seed specification
stackerList_[specific] = stackerList[specific]
slicer_[specific] = slicer[specific]
else:
err = 'Invalid value for specifiedDith: %s.'%specifiedDith
err += 'Allowed values include one of the following:\n%s'%(slicer.keys())
raise ValueError(err)
stackerList, slicer = stackerList_, slicer_
print('\nRunning the analysis for %s'%slicer.keys())
# ------------------------------------------------------------------------
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write('\nObserving strategies considered: %s\n'%(list(slicer.keys())))
readme.close()
# ------------------------------------------------------------------------
# set up bundle for numGal (and later deltaN/N)
myBundles = {}
dustMap = maps.DustMap(interp=False, nside=nside) # include dustMap; actual in/exclusion of dust is handled by the galaxyCountMetric
for dither in slicer:
if dither in stackerList:
myBundles[dither] = metricBundles.MetricBundle(galCountMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither, mapsList=[dustMap])
else:
myBundles[dither] = metricBundles.MetricBundle(galCountMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither, mapsList=[dustMap])
# ------------------------------------------------------------------------
# run the metric/slicer combination for galaxy counts (numGal)
print('\n# Running myBundles ...')
bGroup = metricBundles.MetricBundleGroup(myBundles, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
bGroup.runAll()
# ------------------------------------------------------------------------
# save the raw numGal data.
if saveRawNumGalData:
outDir_new = 'numGalData_beforeMasking_before0pt'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
saveBundleData_npzFormat('%s%s/%s'%(path, outDir, outDir_new), myBundles, 'numGalData_unmasked_no0pt', filterBand)
# ------------------------------------------------------------------------
# print out tot(numGal) associated with each strategy
# write to the readme as well
update = '\n# Before any border masking or photometric error calibration: '
print(update)
for dither in myBundles:
ind = np.where(myBundles[dither].metricValues.mask[:] == False)[0]
printOut = 'Total Galaxies for %s: %.9e' %(dither, sum(myBundles[dither].metricValues.data[ind]))
update += '\n %s'%printOut
print(printOut)
update += '\n'
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(update)
readme.close()
print('\n## Time since the start of the calculation: %.2f hrs'%((time.time()-startTime)/3600.))
# ------------------------------------------------------------------------
# mask the edges: the data in the masked pixels is not changed
plotHandler = plots.PlotHandler(outDir='%s%s'%(path, outDir), resultsDb=resultsDb, thumbnail=False, savefig=False)
print('\n# Masking the edges ...')
myBundles, borderPixelsMasked = maskingAlgorithmGeneralized(myBundles, plotHandler, 'Number of Galaxies',
nside=nside,
pixelRadius=pixelRadiusForMasking,
plotIntermediatePlots=False,
plotFinalPlots=False, printFinalInfo=True,
returnBorderIndices=True)
# ------------------------------------------------------------------------
# save the numGal data.
if saveNumGalDataAfterMasking:
outDir_new = 'numGalData_afterBorderMasking'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
saveBundleData_npzFormat('%s%s/%s'%(path, outDir, outDir_new), myBundles, 'numGalData_masked', filterBand)
# ------------------------------------------------------------------------
# print out tot(numGal) associated with each strategy
# write to the readme as well
if (pixelRadiusForMasking!=0):
update = '\n# After border masking: '
print(update)
for dither in myBundles:
ind = np.where(myBundles[dither].metricValues.mask[:] == False)[0]
printOut = 'Total Galaxies for %s: %.9e' %(dither, sum(myBundles[dither].metricValues.data[ind]))
print(printOut)
update += '\n %s'%printOut
update += '\n'
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(update)
readme.close()
print('\n## Time since the start of the calculation: %.2f hrs'%((time.time()-startTime)/3600.))
################################################################################################################
# If include 0pt errors
# Ansatz: for each pixel i, del_i= k*z_i/sqrt(nObs_i),
# where z_i is the average seeing the pixel minus avgSeeing across map, nObs is the number of observations,
# and k is a constant such that var(del_i)= (0.01)^2. 0.01 for the 1% LSST goal.
# k-constraint equation becomes: k^2*var(z_i/sqrt(nObs_i))= (0.01)^2 --- equation 1
if include0ptErrors:
tablename = 'SummaryAllProps'
if tablename in opsdb.tableNames:
colname = 'seeingFwhmEff'
if colname not in opsdb.columnNames[tablename]:
raise ValueError('Unclear which seeing column to use.')
elif 'Summary' in opsdb.tableNames:
tablename = 'Summary'
colname = 'finSeeing'
if colname not in opsdb.columnNames[tablename]:
colname = 'FWHMeff'
if colname not in opsdb.columnNames[tablename]:
raise ValueError('Unclear which seeing column to use.')
meanMetric = metrics.MeanMetric(col=colname) # for avgSeeing per HEALpix pixel
nObsMetric = NumObsMetric(nside=nside) # for numObs per HEALpix pixel
if includeDustExtinction: coaddMetric = metrics.ExgalM5(lsstFilter=filterBand)
else: coaddMetric = metrics.Coaddm5Metric()
avgSeeingBundle = {}
nObsBundle = {}
coaddBundle = {}
# can pass dustMap to metricBundle regardless of whether to include dust extinction or not.
# the metric choice (coadd vs. exGal) takes care of whether to use the dustMap or not.
dustMap = maps.DustMap(interp=False, nside=nside)
for dither in slicer:
if dither in stackerList:
avgSeeingBundle[dither] = metricBundles.MetricBundle(meanMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither)
nObsBundle[dither] = metricBundles.MetricBundle(nObsMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither)
coaddBundle[dither] = metricBundles.MetricBundle(coaddMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither,
mapsList=[dustMap])
else:
avgSeeingBundle[dither] = metricBundles.MetricBundle(meanMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither)
nObsBundle[dither] = metricBundles.MetricBundle(nObsMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither)
coaddBundle[dither] = metricBundles.MetricBundle(coaddMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither,
mapsList=[dustMap])
print('\n# Running avgSeeingBundle ...')
aGroup = metricBundles.MetricBundleGroup(avgSeeingBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
aGroup.runAll()
print('\n# Running nObsBundle ...')
nGroup = metricBundles.MetricBundleGroup(nObsBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
nGroup.runAll()
print('\n# Running coaddBundle ...')
cGroup = metricBundles.MetricBundleGroup(coaddBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
cGroup.runAll()
# ------------------------------------------------------------------------
# mask the border pixels
for dither in slicer:
avgSeeingBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
nObsBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
coaddBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
# ------------------------------------------------------------------------
# calculate averageSeeing over the entrie map
bundle = {}
bundle['avgSeeingAcrossMap'] = metricBundles.MetricBundle(meanMetric, slicers.UniSlicer(),
sqlconstraint,runName=runName,
metadata='avgSeeingAcrossMap')
bundleGroup = metricBundles.MetricBundleGroup(bundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
bundleGroup.runAll()
avgSeeingAcrossMap = bundle['avgSeeingAcrossMap'].metricValues.data[0]
printOut = '\n# Average seeing across map: %s' %(avgSeeingAcrossMap)
print(printOut)
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(printOut)
readme.close()
# find the zero point uncertainties: for each pixel i, del_i=k*z_i/sqrt(nObs_i),
# where z_i is the average seeing the pixel minus avgSeeing across map, nObs is the number of observations,
# and k is a constant such that var(del_i)=(0.01)^2.
# k-constraint equation becomes: k^2*var(z_i/sqrt(nObs_i))=(0.01)^2 --- equation 1
k = Symbol('k')
zeroPtError = {}
kValue = {}
print('\n# 0pt calculation ansatz: \delta_i=k*z_i/sqrt{nObs_i}, where k is s.t. var(\delta_i)=(0.01)^$')
if save0ptPlots:
outDir_new = '0pt_plots'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
# ------------------------------------------------------------------------
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write('\n\n0pt Information: ')
readme.close()
for dither in avgSeeingBundle:
z_i = avgSeeingBundle[dither].metricValues.data[:]-avgSeeingAcrossMap
nObs_i = nObsBundle[dither].metricValues.data[:]
ind = np.where((nObsBundle[dither].metricValues.mask == False) & \
(nObs_i != 0.0))[0] # make sure the uncertainty is valid; no division by 0
temp = np.var(z_i[ind]/np.sqrt(nObs_i[ind])) # see equation 1
kValue[dither] = solve(k**2*temp-0.01**2,k)[1]
err = np.empty(len(z_i))
err.fill(-500) # initiate
err[ind] = (kValue[dither]*z_i[ind])/np.sqrt(nObs_i[ind])
zeroPtError[dither] = err
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write('\nDith strategy: %s'%dither)
readme.close()
# ------------------------------------------------------------------------
if print0ptInformation:
update = '\n# %s'%dither
ind = np.where(zeroPtError[dither] != -500)[0]
goodError = zeroPtError[dither][ind]
update += 'var(0pt): %s'%np.var(goodError)
update += '\n0.01^2 - var(0pt) = %s'%((0.01)**2-np.var(goodError))
update += '\nk-value: %s\n'%kValue[dither]
print(update)
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(update)
readme.close()
# | |
<gh_stars>10-100
import pygame, sys , time , random
from pygame.locals import QUIT,K_UP,K_ESCAPE,KEYDOWN, K_LEFT, K_RIGHT,K_a,K_d
PURPLE = ((170,0,255))
BOX_SIZE = 20
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
BOARD_WIDTH = 10
score = 0
blue = ((0,255,255))
green = ((0,255,0))
pink = ((255,0,101))
yellow = ((255,255,0))
found=0
S_SHAPE = [['.....',
'.....',
'..cc.',
'.cc..',
'.....'],
['.....',
'..c..',
'..cc.',
'...c.',
'.....']]
I_SHAPE = [['..c..',
'..c..',
'..c..',
'..c..',
'.....'],
['.....',
'.....',
'cccc.',
'.....',
'.....']]
O_SHAPE = [['.....', #2d array o_shape[0] will be used further to draw shape {to be more specific otherwise koi random sa shape aayega}
'.....',
'.cc..',
'.cc..',
'.....']]
Z_SHAPE = [['.....',
'.....',
'.cc..',
'..cc.',
'.....'],
['.....',
'...c.',
'..cc.',
'..c..',
'.....']]
def pieces_dict(): #to show all the available pieces
return {
'S':S_SHAPE,
'I':I_SHAPE, #returning a dictionary
'O':O_SHAPE,
'Z':Z_SHAPE
}
def color_dict(): #to show all the available colors
return {
'P':pink,
'B':blue, #returning a dictionary
'Y':yellow,
'G':green
}
def run_tetris_game(name):
score = 0
pygame.init() # start the game engine
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT) ) # create a window
pygame.display.set_caption('My TetrisGame') # give title to the window
game_matrix = create_game_matrix() #main game matrix 20 X 10
last_time_piece_moved = time.time() # time.time() is used to find the seconds passed since last action
piece = create_piece()
while True: #start loop to take input from user
screen.fill(( 0, 0, 0)) #makes the colour of the screen black
# MOVE THE PIECE ONE CELL DOWN AFTER 1 SECOND
if (time.time() - last_time_piece_moved > 0.5):
piece['row'] = piece['row'] + 1 # we move to the next row if current time exceeds by 1s from prev move
last_time_piece_moved = time.time() # and update it each time with current time
# DISPLAY PIECE IN THE BOARD
draw_moving_piece(screen, piece)
pygame.draw.rect( # inbuilt func of pygame with 4 parameters
screen, # window where game will be displayed
PURPLE, # color of rect box
[100, 50, 10 * BOX_SIZE + 10, 20 * BOX_SIZE + 10 ], 5) # point where the rect should be drawn first-hor second-vert----- {+10 to remove screen error which is coming}
# parameters showing box width and height where 10*20 is 10cols and 20boxsize
# last parameter is line thickness
draw_board(screen, game_matrix , piece)
draw_score(screen, score , name)
listen_to_user_input(game_matrix, piece) #to move left-right
# CHECK IF PIECE REACHES THE END
if (not isValidPosition(game_matrix, piece, adjRow=1)):
game_matrix = update_game_matrix(game_matrix, piece) #updating which all cell are no longer empty and block has occupied the cell now
lines_removed = remove_completed_lines(game_matrix)
score += lines_removed
piece = create_piece() #start with a new piece again
# STATEMENT REPLACED---if (piece['row'] == 19 or game_matrix[piece['row'] + 1][piece['column']] != '.'): in update function #box needs to stop when either we have reached just above the last row or when the next coming cell is occupied
if not game_over(screen, game_matrix):
draw_over(screen)
pygame.display.update() #updates the screen everytime
for event in pygame.event.get(QUIT): #pygame.event contains the list of events the user do like leftclick,close etc
pygame.quit()
sys.exit() # terminate if any QUIT events are present
def isOnBoard(row, column):
return column >= 0 and column < 10 and row < 20
def draw_score(screen, score,name): #screen is the window where we want to display the text
# DRAW THE SCORE TEXT
#create font object
font = pygame.font.Font('freesansbold.ttf', 18) #parameters- font type with extension .ttf and size
#create an image of text
textsurface = font.render('Score: %s' % score, True, (255,255,255)) #parameters- text to display, antialias, color
#add text image to game window
Name = font.render('PLAYER: %s' % name, True, (255, 255, 255))
screen.blit(textsurface, (640 - 150, 20)) # parameters- textimage window and loc to start drawing
screen.blit(Name, (640 - 150, 60))
def remove_completed_lines(game_matrix):
lines_removed = 0 #to keep track of score
for row in range(20):
if(is_line_completed(game_matrix,row)):
for row_to_move_down in range(row, 0, -1): # Loop from the completed row to the top row
for col in range(10):
game_matrix[row_to_move_down][col] = game_matrix[row_to_move_down-1][col] # Move each cell one row down and removing the row which is completed
# Set very top line to blank.
for x in range(10):
game_matrix[0][x] = '.'
lines_removed += 1
return lines_removed
def is_line_completed(game_matrix, row):
# Return True if the line filled with boxes with no gaps.
for column in range(10):
if game_matrix[row][column] == '.':
return False
return True
def listen_to_user_input(game_matrix,piece):
for event in pygame.event.get(): #loop over each event (check when will it come out of loop}
if event.type == KEYDOWN: #checking kedown events
if (event.key == K_LEFT ) and isValidPosition(game_matrix,piece,adjColumn=-1): #if left key presses then decrease col by 1
piece['column'] -= 1
elif (event.key == K_RIGHT ) and isValidPosition(game_matrix,piece,adjColumn=1): #checking whether the next updated col is valid or not
# *initially for single cell-- isValidPosition(game_matrix,piece['row'],piece['column']+1) *
piece['column'] += 1
elif (event.key == K_UP): #up key to increment rotations each time
piece['rotation'] = (piece['rotation'] + 1) % len(pieces_dict()[piece['shape']])
# eg- 0%2=0, 1%2=1, 2%2=0, 3%2=1 ... 2 as len(pieces_dict()[ shape randomly selected ]) for s and i
#0-normal piece 1-rotated piece version
if not isValidPosition(game_matrix, piece):
piece['rotation'] = (piece['rotation'] - 1) % len(pieces_dict()[piece['shape']]) #if position is not valid then we will undue the roattion pro done
#REFER isValidPosition -- think adjrow and adj col (got it though)
def isValidPosition(game_matrix, piece, adjColumn=0, adjRow=0):
# Return True if the every block of the piece is within the board and not colliding
piece_matrix = pieces_dict()[piece['shape']][piece['rotation']] # in each drawmovingpiece updategame and isval function we replace 0 with the rotationpiece options
for row in range(5):
for col in range(5):
if piece_matrix[row][col] == '.': #if cell is empty do nothing
continue
if not isOnBoard(piece['row']+ row + adjRow, piece['column']+ col + adjColumn): # to check if piece is moving within board
return False
if game_matrix[piece['row']+ row + adjRow][piece['column'] +col + adjColumn] != '.': # Check if piece is moving at empty pos
return False
return True
def create_piece():
piece = {} #create a piece using dictionary
random_shape = random.choice(list(pieces_dict().keys())) #choose randomly from available list of shape lables extracted in form keys only
random_color= random.choice(list(color_dict().keys()))
piece['shape'] = random_shape
piece['color'] = random_color
piece['rotation'] = 0 #to keep track of rotations *0 means the show the normal rotated piece
piece['row'] = 0 #set its location to (0,2)
piece['column'] = 2
return piece
def draw_board(screen, matrix,piece): #drawing the board respective to what is there on the matrix
game_matrix_columns = 10
game_matrix_rows = 20
for row in range(game_matrix_rows):
for column in range(game_matrix_columns):
if (matrix[row][column] != '.'): #call the draw tetris function if the cell has anything else than .
draw_single_tetris_box(screen, row, column, (255, 255, 255), (180, 180, 180))
def draw_moving_piece(screen, piece):
shape_to_draw = pieces_dict()[piece['shape']][piece['rotation']]
color= color_dict()[piece['color']]
# the shape which is selected in piece['shape'] usko key ki tarah use karke value extract kari h jismein shape aajaye..
# [piece['rotation']] is to extract the element of the 2d array in shape value acc to the rotated shpe options {*used rotating prop to see which matrix should be used*}
for row in range(5): #since shape's matrix is of 5 X 5
for col in range(5):
if shape_to_draw[row][col] !='.': #will draw a single tetris whenevr there is c and finally we can get full req shape using this single tetriss
draw_single_tetris_box(screen,piece['row']+row,piece['column']+col,color, (255, 255, 255))
def update_game_matrix(matrix, piece): #to make the corresponding changes in the game matrix acc to shape encountered
for row in range(5):
for col in range(5):
if(pieces_dict()[piece['shape']][piece['rotation']][row][col] != '.'):
matrix[piece['row']+row][piece['column']+col] = 'c' # *we get the pos at which we have to write c at shape-matrix row + piece-matrix row i.e pieceloc + piececell *
return matrix
def draw_single_tetris_box(screen, matrix_cell_row, matrix_cell_column, color, shadow_color):
# board margin(100,50) + line thickness + (column {using value stored in dict in create peice func} * box_size)
origin_x = 100 + 5 +( matrix_cell_column*20+1) #add 1 to leave 1 pixel space btw the squares
origin_y = 50 + 5 + ( matrix_cell_row*20+1)
pygame.draw.rect(screen, shadow_color , [origin_x, origin_y, 20*1, 20*1]) #a grey box of size 1 X 1 be started on (x,y)
pygame.draw.rect(screen, color ,[origin_x, origin_y,18,18]) #no thickness by default 0 { this white box is entirely and not for a single block }
#box size means size of each block and height and width specify how | |
def test_create_mahindra_gps_device_log_with_valid_deleted_on(self):
data = self.minimum_valid_data.copy()
data["deleted_on"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted_on"] = str(datetime.now())
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["deleted_on"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_deleted_on(self):
data = self.minimum_valid_data.copy()
data["deleted_on"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = "09/12/18"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = "09:12:18:20:20:300"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_datetime(self):
data = self.minimum_valid_data.copy()
data["datetime"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["datetime"] = str(datetime.now())
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_datetime(self):
data = self.minimum_valid_data.copy()
data["datetime"] = "invalid_format"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["datetime"] = "09/12/18"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["datetime"] = "09:12:18:20:20:300"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["deleted_on"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_vehicle_id(self):
data = self.minimum_valid_data.copy()
data["vehicle_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_id"] = generate_random_string(49)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_id"] = generate_random_string(50)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_vehicle_id(self):
data = self.minimum_valid_data.copy()
data["vehicle_id"] = generate_random_string(51)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_id"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = 1.1234567891
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["longitude"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_longitude(self):
data = self.minimum_valid_data.copy()
data["longitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["longitude"] = 1.12345678911
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = 1232333.1231
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = 1.1234567891
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["latitude"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_latitude(self):
data = self.minimum_valid_data.copy()
data["latitude"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["latitude"] = 1.12345678911
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_fuel_efficiencyd(self):
data = self.minimum_valid_data.copy()
data["fuel_efficiency"] = "36%"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["fuel_efficiency"] = generate_random_string(29)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["fuel_efficiency"] = generate_random_string(30)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["fuel_efficiency"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_fuel_efficiency(self):
data = self.minimum_valid_data.copy()
data["fuel_efficiency"] = generate_random_string(31)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_address(self):
data = self.minimum_valid_data.copy()
data["address"] = "valid_address"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["address"] = generate_random_string(299)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["address"] = generate_random_string(300)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["address"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_address(self):
data = self.minimum_valid_data.copy()
data["address"] = generate_random_string(301)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_status(self):
data = self.minimum_valid_data.copy()
data["status"] = "valid_status"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = generate_random_string(299)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = generate_random_string(300)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["status"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_status(self):
data = self.minimum_valid_data.copy()
data["status"] = generate_random_string(301)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_driver_name(self):
data = self.minimum_valid_data.copy()
data["driver_name"] = "MyNameIsKhan"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = generate_random_string(49)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = generate_random_string(50)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_name"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_driver_name(self):
data = self.minimum_valid_data.copy()
data["driver_name"] = generate_random_string(51)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_driver_number(self):
data = self.minimum_valid_data.copy()
data["driver_number"] = "1800140020"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_number"] = "9878787878"
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driver_number"] = None
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_driver_number(self):
data = self.minimum_valid_data.copy()
data["driver_number"] = "0123456789"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "123456789"
data["device_id"] = "mh2000"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "12345678911"
data["device_id"] = "mh2001"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "12345ab678"
data["device_id"] = "mh2002"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["driver_number"] = "invalid123"
data["device_id"] = "mh2003"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_driving_licence_number(self):
data = self.minimum_valid_data.copy()
data["driving_licence_number"] = "dl12ab35844"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driving_licence_number"] = generate_random_string(39)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driving_licence_number"] = generate_random_string(40)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["driving_licence_number"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_driving_licence_number(self):
data = self.minimum_valid_data.copy()
data["driving_licence_number"] = generate_random_string(41)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_with_valid_vehicle_number(self):
data = self.minimum_valid_data.copy()
data["vehicle_number"] = "dl12ab5844"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = "MH-12-BOM-2018"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = "MH12-Jh2018"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_number"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_vehicle_number(self):
data = self.minimum_valid_data.copy()
data["vehicle_number"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_number"] = "2018MH12BJP"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_number"] = "M12sp2018"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_vehicle_type(self):
data = self.minimum_valid_data.copy()
data["vehicle_type"] = "dl12ab35844"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_type"] = generate_random_string(39)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_type"] = generate_random_string(40)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_type"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_vehicle_type(self):
data = self.minimum_valid_data.copy()
data["vehicle_type"] = generate_random_string(41)
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_vehicle_status(self):
data = self.minimum_valid_data.copy()
data["vehicle_status"] = "loading"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_status"] = "unloading"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_status"] = "loaded"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_status"] = "unloaded"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["vehicle_status"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_vehicle_status(self):
data = self.minimum_valid_data.copy()
data["vehicle_status"] = "invalid_status"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["vehicle_status"] = "LoadIng"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_speed(self):
data = self.minimum_valid_data.copy()
data["speed"] = 40.00
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["speed"] = 60
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["speed"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_speed(self):
data = self.minimum_valid_data.copy()
data["speed"] = "invalid_speed"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["speed"] = "-12.00"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_mahindra_gps_device_log_with_valid_device(self):
data = self.minimum_valid_data.copy()
data["device"] = self.mahindra_gps_device.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data["device"] = None
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_invalid_device(self):
data = self.minimum_valid_data.copy()
data["device"] = self.mahindra_gps_device.id * 1000
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["device"] = -12
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["device"] = 0
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["device"] = 1.32
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["device"] = "invalid"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data["device"] = datetime.now()
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Adding latitude field to minimum valid data required
def test_create_mahindra_gps_device_log_with_latitude(self):
self.minimum_valid_data["latitude"] = "21.9200000763"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding vehicle_status field to minimum valid data required
def test_create_mahindra_gps_device_log_with_vehicle_status(self):
self.minimum_valid_data["vehicle_status"] = "unloaded"
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Adding driver field to minimum valid data required
def test_create_mahindra_gps_device_log_with_driver(self):
self.minimum_valid_data["device"] = self.mahindra_gps_device.id
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.post(self.create_url, self.minimum_valid_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_mahindra_gps_device_log_with_full_valid_data(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response | |
"""Implementations of the IPFP algorithm to solve for equilibrium and do comparative statics
in several variants of the `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model:
* homoskedastic with singles (as in Choo and Siow 2006)
* homoskedastic without singles
* gender-heteroskedastic: with a scale parameter on the error term for women
* gender- and type-heteroskedastic: with a scale parameter on the error term for each gender and type
* two-level nested logit, with nests and nest parameters that do not depend on the type, and {0} as the first nest
Each solver, when fed the joint surplus and margins, returns the equilibrium matching patterns, the adding-up errors on the margins,
and if requested (using `gr=True`) the derivatives of the matching patterns in all primitives.
"""
import numpy as np
from math import sqrt
from typing import Union, Tuple, List
import scipy.linalg as spla
from utils import print_stars, bs_error_abort, npexp, npmaxabs, \
nppow, der_nppow, nprepeat_col, nprepeat_row, describe_array, test_vector
TripleArrays = Tuple[np.ndarray, np.ndarray, np.ndarray]
IPFPnoGradientResults = Tuple[TripleArrays, np.ndarray, np.ndarray, np.ndarray]
IPFPGradientResults = Tuple[TripleArrays,
np.ndarray, np.ndarray, np.ndarray, TripleArrays]
IPFPResults = Union[IPFPnoGradientResults, IPFPGradientResults]
def _ipfp_check_sizes(men_margins: np.ndarray, women_margins: np.ndarray,
Phi: np.ndarray) -> Tuple[int]:
"""checks that the margins and surplus have the correct shapes and sizes """
X = test_vector(men_margins)
Y = test_vector(women_margins)
if Phi.shape != (X, Y):
bs_error_abort(f"The shape of Phi should be ({X}, {Y}")
return X, Y
def ipfp_homoskedastic_nosingles_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) \
-> IPFPnoGradientResults:
"""Solves for equilibrium in a Choo and Siow market without singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of :math:`(\\mu_{xy})` wrt `Phi`
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
muxy: the matching patterns, shape (X, Y)
marg_err_x, marg_err_y: the errors on the margins
and the gradients of :math:`(\\mu_{xy})` wrt `Phi` if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
n_couples = np.sum(men_margins)
# check that there are as many men as women
if np.abs(np.sum(women_margins) - n_couples) > n_couples * tol:
bs_error_abort("There should be as many men as women")
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
ephi2T = ephi2.T
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# starting with a reasonable initial point for tx and ty: : tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
bigc = sqrt(n_couples / np.sum(ephi2))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * err_diff
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = men_margins / sx
sy = ephi2T @ tx
ty = women_margins / sy
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi, tyi = tx, ty
niter += 1
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = np.sum(muxy, 1) - men_margins
marg_err_y = np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return muxy, marg_err_x, marg_err_y
else:
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = 0
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = 0
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:X, :]
dT = dt_dT[X:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(X):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + Y),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += Y
for iwoman in range(Y):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:Y,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy += np.diag(muxy_vec2)
return muxy, marg_err_x, marg_err_y, dmuxy
def ipfp_homoskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a Choo and Siow market with singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi) if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# where mux0=tx**2 and mu0y=ty**2
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
ephi2T = ephi2.T
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = sqrt(nindivs / (X + Y + 2.0 * np.sum(ephi2)))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * bigc
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = (np.sqrt(sx * sx + 4.0 * men_margins) - sx) / 2.0
sy = ephi2T @ tx
ty = (np.sqrt(sy * sy + 4.0 * women_margins) - sy) / 2.0
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = txi * txi
mu0y = tyi * tyi
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(2.0 * txi + sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(2.0 * tyi + syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_sum_categories + n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt men_margins
rhs[:X, :X] = np.eye(X)
# to compute derivatives of (txi, tyi) wrt women_margins
rhs[X:n_sum_categories,
X:n_sum_categories] = np.eye(Y)
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = n_sum_categories
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = n_sum_categories
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and | |
<gh_stars>1-10
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from functools import total_ordering
from itertools import groupby
import attr
from licensedcode import MAX_DIST
from licensedcode import query
from licensedcode.spans import Span
from licensedcode.stopwords import STOPWORDS
from licensedcode.tokenize import matched_query_text_tokenizer
from licensedcode.tokenize import index_tokenizer
"""
LicenseMatch data structure and matches merging and filtering routines.
"""
TRACE = False
TRACE_MERGE = False
TRACE_REFINE = False
TRACE_FILTER_CONTAINED = False
TRACE_FILTER_OVERLAPPING = False
TRACE_FILTER_SHORT = False
TRACE_FILTER_SPURIOUS_SINGLE_TOKEN = False
TRACE_FILTER_SPURIOUS = False
TRACE_FILTER_RULE_MIN_COVERAGE = False
TRACE_FILTER_LOW_SCORE = False
TRACE_SET_LINES = False
TRACE_MATCHED_TEXT = False
TRACE_MATCHED_TEXT_DETAILS = False
# these control the details in a LicenseMatch representation
TRACE_REPR_MATCHED_RULE = False
TRACE_REPR_SPAN_DETAILS = False
TRACE_REPR_THRESHOLDS = False
def logger_debug(*args): pass
if (TRACE
or TRACE_MERGE
or TRACE_REFINE
or TRACE_FILTER_CONTAINED
or TRACE_FILTER_OVERLAPPING
or TRACE_FILTER_RULE_MIN_COVERAGE
or TRACE_FILTER_SPURIOUS_SINGLE_TOKEN
or TRACE_FILTER_SPURIOUS
or TRACE_FILTER_SHORT
or TRACE_FILTER_RULE_MIN_COVERAGE
or TRACE_FILTER_LOW_SCORE
or TRACE_SET_LINES
or TRACE_MATCHED_TEXT
or TRACE_MATCHED_TEXT_DETAILS):
use_print = True
if use_print:
printer = print
else:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
printer = logger.debug
def logger_debug(*args):
return printer(' '.join(isinstance(a, str) and a or repr(a)
for a in args))
def _debug_print_matched_query_text(match, query, extras=5):
"""
Print a matched query text including `extras` tokens before and after
the match. Used for debugging license matches.
"""
# create a fake new match with extra tokens before and after
new_match = match.combine(match)
new_qstart = max([0, match.qstart - extras])
new_qend = min([match.qend + extras, len(query.tokens)])
new_qspan = Span(new_qstart, new_qend)
new_match.qspan = new_qspan
logger_debug(new_match)
logger_debug(' MATCHED QUERY TEXT with extras')
qt = new_match.matched_text(whole_lines=False)
logger_debug(qt)
# TODO: use attrs
# FIXME: Implement each ordering functions. From the Python docs: Note: While
# this decorator makes it easy to create well behaved totally ordered types, it
# does come at the cost of slower execution and more complex stack traces for
# the derived comparison methods. If performance benchmarking indicates this is
# a bottleneck for a given application, implementing all six rich comparison
# methods instead is likely to provide an easy speed boost.
@total_ordering
class LicenseMatch(object):
"""
License detection match to a rule with matched query positions and lines and
matched index positions. Also computes a score for a match. At a high level,
a match behaves a bit like a Span and has several similar methods taking
into account both the query and index Span.
"""
__slots__ = (
'rule', 'qspan', 'ispan', 'hispan', 'query_run_start',
'matcher', 'start_line', 'end_line', 'query',
)
def __init__(self, rule, qspan, ispan, hispan=None, query_run_start=0,
matcher='', start_line=0, end_line=0, query=None):
"""
Create a new match from:
- rule: matched Rule object
- qspan: query text matched Span, start at zero which is the absolute
query start (not the query_run start).
- ispan: rule text matched Span, start at zero which is the rule start.
- hispan: rule text matched Span for high tokens, start at zero which
is the rule start. Always a subset of ispan.
- matcher: a string indicating which matching procedure this match was
created with. Used for diagnostics, debugging and testing.
Note that the relationship between the qspan and ispan is such that:
- they always have the exact same number of items but when sorted each
value at an index may be different
- the nth position when sorted by position is such that their token
value is equal for this position.
"""
self.rule = rule
self.qspan = qspan
self.ispan = ispan
self.hispan = Span() if hispan is None else hispan
self.query_run_start = query_run_start
self.matcher = matcher
self.start_line = start_line
self.end_line = end_line
self.query = query
def __repr__(
self,
trace_spans=TRACE_REPR_SPAN_DETAILS,
trace_thresholds=TRACE_REPR_THRESHOLDS,
trace_rule=TRACE_REPR_MATCHED_RULE,
):
spans = ''
if trace_spans:
hispan = self.hispan
qspan = self.qspan
ispan = self.ispan
spans = ('\n qspan=%(qspan)r, '
'\n ispan=%(ispan)r, '
'\n hispan=%(hispan)r' % locals())
thresh = ''
if trace_thresholds:
qdens = round(self.qdensity() * 100, 2)
idens = round(self.idensity() * 100, 2)
thresh = ('\n qdens=%(qdens)r, idens=%(idens)r' % locals())
rule_id = self.rule.identifier
if trace_rule:
rule_id = '\n ' + repr(self.rule)
rep = dict(
spans=spans,
thresh=thresh,
matcher=self.matcher,
rule_id=rule_id,
license_expression=self.rule.license_expression,
score=self.score(),
coverage=self.coverage(),
len=self.len(),
hilen=self.hilen(),
qreg=(self.qstart, self.qend),
rlen=self.rule.length,
ireg=(self.istart, self.iend),
lines=self.lines(),
)
return (
'LicenseMatch: %(matcher)r, lines=%(lines)r, %(rule_id)r, '
'%(license_expression)r, '
'sc=%(score)r, cov=%(coverage)r, '
'len=%(len)r, hilen=%(hilen)r, rlen=%(rlen)r, '
'qreg=%(qreg)r, ireg=%(ireg)r%(thresh)s%(spans)s'
) % rep
def __eq__(self, other):
"""
Strict equality is based on licensing, matched positions and not based
on matched rule.
"""
return (isinstance(other, LicenseMatch)
and self.same_licensing(other)
and self.qspan == other.qspan
and self.ispan == other.ispan
)
def __ne__(self, other):
"""
Strict inequality is based on licensing, matched positions and not based
on matched rule.
"""
if not isinstance(other, LicenseMatch):
return True
return not all([
self.same_licensing(other),
self.qspan == other.qspan,
self.ispan == other.ispan,
])
def same_licensing(self, other):
"""
Return True if other has the same licensing.
"""
return self.rule.same_licensing(other.rule)
def licensing_contains(self, other):
"""
Return True if this match licensing contains the other match licensing.
"""
return self.rule.licensing_contains(other.rule)
def lines(self):
return self.start_line, self.end_line
@property
def qstart(self):
return self.qspan.start
def __lt__(self, other):
return self.qstart < other.qstart
@property
def qend(self):
return self.qspan.end
def len(self):
"""
Return the length of the match as the number of matched tokens.
"""
return len(self.qspan)
@property
def istart(self):
return self.ispan.start
@property
def iend(self):
return self.ispan.end
def hilen(self):
"""
Return the length of the match as the number of matched high tokens.
"""
return len(self.hispan)
def __contains__(self, other):
"""
Return True if qspan contains other.qspan and ispan contains other.ispan.
"""
return other.qspan in self.qspan and other.ispan in self.ispan
def qcontains(self, other):
"""
Return True if qspan contains other.qspan.
"""
return other.qspan in self.qspan
def qdistance_to(self, other):
"""
Return the absolute qspan distance to other match.
Touching and overlapping matches have a zero distance.
"""
return self.qspan.distance_to(other.qspan)
def idistance_to(self, other):
"""
Return the absolute ispan distance from self to other match.
Touching and overlapping matches have a zero distance.
"""
return self.ispan.distance_to(other.ispan)
def overlap(self, other):
"""
Return the number of overlaping positions with other.
"""
return self.qspan.overlap(other.qspan)
def _icoverage(self):
"""
Return the coverage of this match to the matched rule as a float between
0 and 1.
"""
if not self.rule.length:
return 0
return self.len() / self.rule.length
def coverage(self):
"""
Return the coverage of this match to the matched rule as a rounded float
between 0 and 100.
"""
return round(self._icoverage() * 100, 2)
def qmagnitude(self):
"""
Return the maximal query length represented by this match start and end
in the query. This number represents the full extent of the matched
query region including matched, unmatched and INCLUDING unknown tokens.
The magnitude is the same as the length of a match for a contiguous
match without any unknown token in its range. It will be greater than
the matched length for a match with non-contiguous words. It can also be
greater than the query length when there are unknown tokens in the
matched range.
"""
# The query side of the match may not be contiguous and may contain
# unmatched known tokens or unknown tokens. Therefore we need to compute
# the real portion query length including unknown tokens that is
# included in this match, for both matches and unmatched tokens
query = self.query
qspan = self.qspan
qmagnitude = self.qrange()
# note: to avoid breaking many tests we check query presence
if query:
# Compute a count of unknown tokens that are inside the matched
# range, ignoring end position of the query span: unknowns here do
# not matter as they are never in the match but they influence the
# score.
unknowns_pos = qspan & query.unknowns_span
qspe = qspan.end
unknowns_pos = (pos for pos in unknowns_pos if pos != qspe)
qry_unkxpos = query.unknowns_by_pos
unknowns_in_match = sum(qry_unkxpos[pos] for pos in unknowns_pos)
# update the magnitude by adding the count of unknowns in the match.
# This number represents the full extent of the matched query region
# including matched, unmatched and unknown tokens.
qmagnitude += unknowns_in_match
return qmagnitude
def qcontains_stopwords(self):
"""
Return True if this match | |
<filename>experimental/language_structure/vrnn/utils.py
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils methods/classes."""
from typing import Any, Dict, Optional, Sequence
import numpy as np
import sklearn.metrics
import tensorflow as tf
import tensorflow_hub as hub
PADDING_VALUE = 0
def state_is_tuple(cell_type):
return cell_type == 'lstm'
def create_mask(inputs: tf.Tensor,
masking_prob: Dict[Any, float],
seed: Optional[int] = None) -> tf.Tensor:
"""Creates mask by the masking probability of each element in the inputs."""
threshold = tf.zeros_like(inputs, dtype=tf.float32)
for element, ratio in masking_prob.items():
threshold += tf.where(tf.equal(inputs, element), ratio, 0.0)
prob = tf.random.uniform(inputs.shape, minval=0, maxval=1, seed=seed)
return tf.cast(prob < threshold, tf.int32)
def value_in_tensor(inputs: tf.Tensor, tensor: tf.Tensor) -> tf.Tensor:
"""Checks if each element in `inputs` is in `tensor`."""
tile_multiples = tf.concat(
[tf.ones(tf.rank(inputs), dtype=tf.int32),
tf.shape(tensor)], axis=0)
inputs = tf.tile(tf.expand_dims(inputs, -1), tile_multiples)
return tf.reduce_any(tf.equal(inputs, tensor), -1)
def create_rebalanced_sample_weights(
labels: tf.Tensor,
dtype: Optional[tf.dtypes.DType] = tf.float32,
mask_padding: Optional[bool] = True) -> tf.Tensor:
"""Creates the sample weights by inverse of label counts."""
unique_label, _, count = tf.unique_with_counts(tf.reshape(labels, [-1]))
weights = tf.reduce_min(count) / count
sample_weights = tf.map_fn(
fn=lambda t: tf.where(labels == tf.cast(t[0], dtype=labels.dtype), t[1], 0
),
elems=tf.stack([tf.cast(unique_label, dtype=weights.dtype), weights],
axis=1))
sample_weights = tf.cast(tf.reduce_sum(sample_weights, axis=0), dtype=dtype)
if mask_padding:
sample_weights *= tf.cast(tf.sign(labels), dtype=dtype)
sample_weights /= tf.reduce_mean(sample_weights)
return sample_weights
def get_rnn_cls(cell_type: str):
if cell_type == 'lstm':
return tf.keras.layers.LSTM
elif cell_type == 'gru':
return tf.keras.layers.GRU
else:
return tf.keras.layers.SimpleRNN
def get_rnn_cell(cell_type: str):
if cell_type == 'lstm':
return tf.keras.layers.LSTMCell
elif cell_type == 'gru':
return tf.keras.layers.GRUCell
else:
return tf.keras.layers.SimpleRNNCell
def to_one_hot(x) -> tf.Tensor:
"""Returns the argmax of the input tensor in one-hot format."""
indices = tf.math.argmax(x, axis=1)
depth = x.shape.as_list()[-1]
x_hard = tf.one_hot(indices, depth, dtype=x.dtype)
return tf.stop_gradient(x_hard - x) + x
def get_last_step(inputs: tf.Tensor, seq_length: tf.Tensor) -> tf.Tensor:
"""Returns the last step of inputs by the sequence length.
If the sequence length is zero, it will return the zero tensor.
Args:
inputs: tensor of [batch_size, max_seq_length, hidden_size].
seq_length: tensor of [batch_size] recording the actual length of inputs.
Returns:
tensor of [batch_size, hidden_size], where tensor[i, :] = inputs[i,
seq_length[i], :]
"""
batch_range = tf.range(tf.shape(seq_length)[0])
non_empty_seq = tf.sign(seq_length)
safe_indices = tf.cast((seq_length - non_empty_seq), dtype=tf.int32)
indices = tf.stack([batch_range, safe_indices], axis=1)
result = tf.gather_nd(inputs, indices)
# Expand axis to broadcast to the second dimension (hidden size).
result *= tf.expand_dims(tf.cast(non_empty_seq, dtype=result.dtype), axis=1)
return result
# thanks for the implementation at
# https://blog.evjang.com/2016/11/tutorial-categorical-variational.html
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel 0 to 1."""
uniform = tf.random.uniform(shape, minval=0, maxval=1)
return -tf.math.log(-tf.math.log(uniform + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
"""Draw a sample from the Gumbel-Softmax distribution."""
y = logits + sample_gumbel(tf.shape(logits))
y_adjusted = y / temperature
return tf.nn.softmax(y_adjusted), y_adjusted
class GumbelSoftmaxSampler(tf.keras.layers.Layer):
"""Gumbel-Softmax sampler.
Sample from the Gumbel-Softmax distribution and optionally discretize.
"""
def __init__(self,
temperature,
hard: bool = False,
trainable_temperature: bool = True):
"""GumbelSoftmaxSampler constructor.
Args:
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
trainable_temperature: whether temperature is trainable
"""
self._trainable_temperature = trainable_temperature
self._initial_temperature = temperature
self._hard = hard
super(GumbelSoftmaxSampler, self).__init__()
def build(self, input_shape):
self._temperature = self.add_weight(
'temperature',
initializer=tf.keras.initializers.Constant(self._initial_temperature),
trainable=self._trainable_temperature)
super().build(input_shape)
def call(self, logits: tf.Tensor, return_logits: bool = False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs.
return_logits: whether to also return logits tensor.
Returns:
A [batch_size, n_class] sample from the Gumbel-Softmax distribution.
If self._hard=True, then the returned sample will be one-hot, otherwise it
will be a probabilitiy distribution that sums to 1 across classes.
"""
y, logits = gumbel_softmax_sample(logits, self._temperature)
if self._hard:
y = to_one_hot(y)
if return_logits:
return y, logits
return y
class MLP(tf.keras.Model):
"""Multilayer perceptron."""
def __init__(self,
output_sizes: Sequence[int],
use_bias: bool = True,
dropout: float = 0.5,
hidden_activation: Optional[Any] = None,
final_activation: Optional[Any] = None):
super(MLP, self).__init__()
self._layers = []
for output_size in output_sizes:
self._layers.append(
tf.keras.layers.Dense(
output_size, activation=hidden_activation, use_bias=use_bias))
if dropout not in (None, 0):
self._layers.append(tf.keras.layers.Dropout(dropout))
if final_activation:
self._layers.append(final_activation)
def call(self, inputs):
outputs = inputs
for layer in self._layers:
outputs = layer(outputs)
return outputs
class SequentialWordLoss(tf.keras.losses.SparseCategoricalCrossentropy):
"""Cross entropy loss of the word id sequences."""
def __init__(self, *args, word_weights: Optional[Any] = None, **kwargs):
"""SequentialWordLoss constructor.
Args:
*args: optional arguments passed to
tf.keras.losses.SparseCategoricalCrossentropy.
word_weights: of shape [vocab_size], the weights of each token, used to
rescale loss. word_weights[0] should be the weight of the padding token
id 0.
**kwargs: optional arguments passed to
tf.keras.losses.SparseCategoricalCrossentropy.
"""
# Disable reduction to be able to apply sequence mask and (optional) word
# weights.
super(SequentialWordLoss, self).__init__(
reduction=tf.keras.losses.Reduction.NONE, *args, **kwargs)
self._word_weights = word_weights
def call(self, y_true, y_pred, sample_weight: Optional[tf.Tensor] = None):
loss = super().call(y_true=y_true, y_pred=y_pred)
if sample_weight:
sample_weight = tf.cast(sample_weight, dtype=loss.dtype)
loss *= sample_weight
if self._word_weights is not None:
word_idx = tf.cast(y_true, tf.int32)
weights = tf.gather(self._word_weights, word_idx)
loss *= tf.cast(weights, dtype=loss.dtype)
return loss
class BowLoss(SequentialWordLoss):
"""Bag-of-word loss [1].
Reference:
[1]: Zhao et al. Learning Discourse-level Diversity for Neural Dialog Models
using Conditional Variational Autoencoders. https://arxiv.org/abs/1703.10960
"""
def __init__(self, *args, sequence_axis: Optional[int] = 1, **kwargs):
"""BowLoss Constructor.
Args:
*args: arguments passed to super class SequentialWordLoss.
sequence_axis: the axis of the sequence dimension bow logits to be
repeated.
**kwargs: arguments passed to super class SequentialWordLoss.
"""
super(BowLoss, self).__init__(*args, **kwargs)
self._sequence_axis = sequence_axis
def call(self, y_true, bow_pred, sample_weight: Optional[tf.Tensor] = None):
"""Computes bow loss.
Args:
y_true: the label tensor, of shape [d0, d1, ..., dN] where dN =
self._sequence_axis.
bow_pred: the bow prediction logits, of shape [d0, d1, ..., d_{N-1}, H].
It will be repeated to [d0, d1, ..., d_{N-1}, dN, H] and compute
SequentialWordLoss with y_true.
sample_weight: the optional tensor of shape [d0, d1, ..., dN] specifying
the weight to rescale the loss.
Returns:
loss: tensor of shape [d0, d1, ..., dN].
"""
y_true_shape = tf.shape(y_true)
y_true_rank = len(y_true.shape)
axis = self._sequence_axis
if y_true_rank <= axis:
raise ValueError(
'Expected sequence axis {}, but y_true has a lower rank {}: {}'
.format(axis, y_true_rank, y_true_shape))
# Step 1/2: construct the multiples for tf.tile; insert the max_seq_length
# multiple in the sequence axis. It's equivalent to:
# multiples = [1] * y_true_rank
# multiples.insert(axis, y_true_shape[axis])
multiples = tf.concat([[1] * axis, [y_true_shape[axis]], [1] *
(y_true_rank - axis)],
axis=0)
# Step 2/2: repeat `bow_pred` to match `y_true` on the sequence axis.
y_pred = tf.tile(tf.expand_dims(bow_pred, axis=axis), multiples)
loss = super().call(y_true, y_pred, sample_weight)
return loss
class KlLoss(tf.keras.losses.KLDivergence):
"""KL divergence with Batch Prior Regularization support [1].
Reference:
[1]: Zhao et al. Learning Discourse-level Diversity for Neural Dialog Models
using Conditional Variational Autoencoders. https://arxiv.org/abs/1703.10960
"""
def __init__(self,
bpr: bool,
*args,
from_logits: Optional[bool] = False,
**kwargs):
super(KlLoss, self).__init__(*args, **kwargs)
self._bpr = bpr
self._from_logits = from_logits
def call(self, p_z, q_z):
if self._from_logits:
p_z = tf.nn.softmax(p_z)
q_z = tf.nn.softmax(q_z)
if self._bpr:
if not p_z.shape.is_compatible_with(q_z.shape):
raise ValueError(
'Inconsistent shape between p_z_logits {} and q_z_logits {}'.format(
p_z.shape, q_z.shape))
batch_size = tf.shape(p_z)[0]
p_z = tf.reduce_mean(p_z, axis=0)
q_z = tf.reduce_mean(q_z, axis=0)
loss = super().call(q_z, p_z) * tf.cast(batch_size, p_z.dtype)
else:
loss = super().call(q_z, p_z)
return loss
class BertPreprocessor(tf.keras.Model):
"""Preprocessor converting text into BERT input formats."""
def __init__(self, tfhub_url: str, max_seq_length: int):
super(BertPreprocessor, self).__init__()
self._tfhub_url = tfhub_url
self._max_seq_length = max_seq_length
preprocess = hub.load(self._tfhub_url)
self._special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()
self.tokenizer = hub.KerasLayer(preprocess.tokenize, name='tokenizer')
self.packer = hub.KerasLayer(
preprocess.bert_pack_inputs,
arguments=dict(seq_length=self._max_seq_length),
name='packer')
def call(self, inputs: Sequence[tf.Tensor], concat: Optional[bool] = False):
segments = [self.tokenizer(input) for input in inputs]
truncated_segments = [
segment[:, :self._max_seq_length] for segment in segments
]
if concat:
return self.packer(truncated_segments)
return [self.packer([segment]) for segment in truncated_segments]
@property
def vocab_size(self) -> int:
return self._special_tokens_dict['vocab_size'].numpy().item()
def _get_flatten_non_padding_value(
tensors: Sequence[tf.Tensor],
mask_gen_tensor: tf.Tensor) -> Sequence[tf.Tensor]:
"""Returns the flatten tensors with the padding filtered."""
mask_gen_tensor = tf.reshape(mask_gen_tensor, [-1])
padding_mask = mask_gen_tensor != PADDING_VALUE
outputs = []
for tensor in tensors:
tensor = tf.reshape(tensor, [-1])
outputs.append(tf.boolean_mask(tensor, padding_mask))
return outputs
def adjusted_mutual_info(y_true: tf.Tensor, y_pred: tf.Tensor) -> float:
"""Computes adjusted mutual information of non-padded prediction and label."""
# pylint: disable=unbalanced-tuple-unpacking
y_pred, y_true = _get_flatten_non_padding_value([y_pred, y_true],
mask_gen_tensor=y_true)
return sklearn.metrics.adjusted_mutual_info_score(y_true, y_pred)
def cluster_purity(y_true: | |
<gh_stars>0
#!/usr/bin/env python3
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, radians
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from tf import transformations as tf
ROTATION_AXIS = 'sxyz'
class BlueArmMoveGroup(object):
def __init__(self):
super(BlueArmMoveGroup, self).__init__()
## BEGIN_SUB_TUTORIAL setup
##
## First initialize `moveit_commander`_ and a `rospy`_ node:
moveit_commander.roscpp_initialize(sys.argv)
## Instantiate a `RobotCommander`_ object. Provides information such as the robot's
## kinematic model and the robot's current joint states
robot = moveit_commander.RobotCommander()
## Instantiate a `PlanningSceneInterface`_ object. This provides a remote interface
## for getting, setting, and updating the robot's internal understanding of the
## surrounding world:
scene = moveit_commander.PlanningSceneInterface()
## Instantiate a `MoveGroupCommander`_ object. This object is an interface
## to a planning group (group of joints). In this tutorial the group is the primary
## arm joints in the blue arm robot, so we set the group's name to "blue_arm".
## If you are using a different robot, change this value to the name of your robot
## arm planning group.
## This interface can be used to plan and execute motions:
group_name = "blue_arm"
move_group = moveit_commander.MoveGroupCommander(group_name)
## Create a `DisplayTrajectory`_ ROS publisher which is used to display
## trajectories in Rviz:
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
## END_SUB_TUTORIAL
## BEGIN_SUB_TUTORIAL basic_info
##
## Getting Basic Information
## ^^^^^^^^^^^^^^^^^^^^^^^^^
# We can get the name of the reference frame for this robot:
planning_frame = move_group.get_planning_frame()
print( "============ Planning frame: %s" % planning_frame)
# We can also print the name of the end-effector link for this group:
eef_link = move_group.get_end_effector_link()
print( "============ End effector link: %s" % eef_link)
# We can get a list of all the groups in the robot:
group_names = robot.get_group_names()
print( "============ Available Planning Groups:", robot.get_group_names())
# Sometimes for debugging it is useful to print the entire state of the
# robot:
print( "============ Printing robot state")
print( robot.get_current_state())
print( "")
## END_SUB_TUTORIAL
# Misc variables
self.robot = robot
self.scene = scene
self.move_group = move_group
self.display_trajectory_publisher = display_trajectory_publisher
self.planning_frame = planning_frame
self.eef_link = eef_link
self.group_names = group_names
self.dof = 7
#Add box under base
box_pos = [0, 0, 0]
box_euler = [0, 0, 0]
box_size = [1, 1, 0.01]
self.add_box('base_box', 'world', box_pos, box_euler, box_size)
def all_close(self, goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return self.all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return self.all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
def go_to_joint_goal(self, joint_goal):
# Copy class variables to local variables to make the web tutorials more clear.
# In practice, you should use the class variables directly unless you have a good
# reason not to.
if len(joint_goal) != self.dof:
return False
move_group = self.move_group
# joint_goal_ = move_group.get_current_joint_values()
# print(type(joint_goal_))
# print(joint_goal_)
# print(type(joint_goal))
# print(joint_goal)
# for i, value in enumerate(joint_goal):
# joint_goal_[i] = value
## BEGIN_SUB_TUTORIAL plan_to_joint_state
##
## Planning to a Joint Goal
## ^^^^^^^^^^^^^^^^^^^^^^^^
## The Panda's zero configuration is at a `singularity <https://www.quora.com/Robotics-What-is-meant-by-kinematic-singularity>`_ so the first
## thing we want to do is move it to a slightly better configuration.
# We can get the joint values from the group and adjust some of the values:
# The go command can be called with joint values, poses, or without any
# parameters if you have already set the pose or joint target for the group
# move_group.set_joint_value_target(joint_goal)
move_group.go(joint_goal, wait=True)
# Calling ``stop()`` ensures that there is no residual movement
move_group.stop()
# move_group.clear_joint_value_target()
## END_SUB_TUTORIAL
# For testing:
current_joints = move_group.get_current_joint_values()
return self.all_close(joint_goal, current_joints, 0.05)
def go_to_pose_goal(self, pos, euler):
# Copy class variables to local variables to make the web tutorials more clear.
# In practice, you should use the class variables directly unless you have a good
# reason not to.
move_group = self.move_group
## BEGIN_SUB_TUTORIAL plan_to_pose
##
## Planning to a Pose Goal
## ^^^^^^^^^^^^^^^^^^^^^^^
## We can plan a motion for this group to a desired pose for the
## end-effector:
quaternion = tf.quaternion_from_euler(radians(euler[0]), radians(euler[1]), radians(euler[2]), axes=ROTATION_AXIS)
pose_goal = geometry_msgs.msg.Pose()
pose_goal.orientation.x = quaternion[0]
pose_goal.orientation.y = quaternion[1]
pose_goal.orientation.z = quaternion[2]
pose_goal.orientation.w = quaternion[3]
pose_goal.position.x = pos[0]
pose_goal.position.y = pos[1]
pose_goal.position.z = pos[2]
move_group.set_pose_target(pose_goal)
## Now, we call the planner to compute the plan and execute it.
plan = move_group.go(wait=True)
# Calling `stop()` ensures that there is no residual movement
move_group.stop()
# It is always good to clear your targets after planning with poses.
# Note: there is no equivalent function for clear_joint_value_targets()
move_group.clear_pose_targets()
## END_SUB_TUTORIAL
# For testing:
# Note that since this section of code will not be included in the tutorials
# we use the class variable rather than the copied state variable
current_pose = self.move_group.get_current_pose().pose
return self.all_close(pose_goal, current_pose, 0.05)
def display_trajectory(self, plan):
# Copy class variables to local variables to make the web tutorials more clear.
# In practice, you should use the class variables directly unless you have a good
# reason not to.
robot = self.robot
display_trajectory_publisher = self.display_trajectory_publisher
## BEGIN_SUB_TUTORIAL display_trajectory
##
## Displaying a Trajectory
## ^^^^^^^^^^^^^^^^^^^^^^^
## You can ask RViz to visualize a plan (aka trajectory) for you. But the
## group.plan() method does this automatically so this is not that useful
## here (it just displays the same trajectory again):
##
## A `DisplayTrajectory`_ msg has two primary fields, trajectory_start and trajectory.
## We populate the trajectory_start with our current robot state to copy over
## any AttachedCollisionObjects and add our plan to the trajectory.
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
display_trajectory_publisher.publish(display_trajectory)
## END_SUB_TUTORIAL
def execute_plan(self, plan):
# Copy class variables to local variables to make the web tutorials more clear.
# In practice, you should use the class variables directly unless you have a good
# reason not to.
move_group = self.move_group
## BEGIN_SUB_TUTORIAL execute_plan
##
## Executing a Plan
## ^^^^^^^^^^^^^^^^
## Use execute if you would like the robot to follow
## the plan that has already been computed:
move_group.execute(plan, wait=True)
## **Note:** The robot's current joint state must be within some tolerance of the
## first waypoint in the `RobotTrajectory`_ or ``execute()`` will fail
## END_SUB_TUTORIAL
def wait_for_state_update(self, box_name, box_is_known=False, box_is_attached=False, timeout=4):
# Copy class variables to local variables to make the web tutorials more clear.
# In practice, you should use the class variables directly unless you have a good
# reason not to.
scene = self.scene
## BEGIN_SUB_TUTORIAL wait_for_scene_update
##
## Ensuring Collision Updates Are Receieved
## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
## If the Python node dies before publishing a collision object update message, the message
## could get lost and the box will not appear. To ensure that the updates are
## made, we wait until we see the changes reflected in the
## ``get_attached_objects()`` and ``get_known_object_names()`` lists.
## For the purpose of this tutorial, we call this function after adding,
## removing, attaching or detaching an object in the planning scene. We then wait
## until the updates have been made or ``timeout`` seconds have passed
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
# Test if the box is in attached objects
attached_objects = scene.get_attached_objects([box_name])
is_attached = len(attached_objects.keys()) > 0
# Test if the box is in the scene.
# Note that attaching the box will remove it from known_objects
is_known = box_name in scene.get_known_object_names()
# Test if we are in the expected state
if (box_is_attached == is_attached) and (box_is_known == is_known):
return True
# Sleep so that we give other threads time on the processor
rospy.sleep(0.1)
seconds = | |
return self.get_level(level)[local_win]
def get_global(self, level: int, global_win: int) -> np.ndarray:
"""
Get window using global index
Parameters
----------
level : int
The decimation level
global_win : int
Global window index
Returns
-------
np.ndarray
Window data
"""
index_offset = self.metadata.levels_metadata[level].index_offset
return self.get_local(level, global_win + index_offset)
def get_chan(self, level: int, chan: str) -> np.ndarray:
"""
Get all the windows for a channel
Parameters
----------
level : int
The decimation level
chan : str
The channel
Returns
-------
np.ndarray
The data for the channels
Raises
------
ChannelNotFoundError
If the channel is not found in the data
"""
from resistics.errors import ChannelNotFoundError
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
idx = self.metadata.chans.index(chan)
return self.get_level(level)[..., idx, :]
def to_string(self) -> str:
"""Class information as a string"""
return self.metadata.to_string()
class Windower(ResisticsProcess):
"""
Windows DecimatedData
This is the primary window making process for resistics and should be used
when alignment of windows with a site or across sites is required.
This method uses numpy striding to produce window views into the decimated
data.
See Also
--------
WindowerTarget : A windower to make a target number of windows
Examples
--------
The Windower windows a DecimatedData object given a reference time and some
window parameters.
There's quite a few imports needed for this example. Begin by doing the
imports, defining a reference time and generating random decimated data.
>>> from resistics.sampling import to_datetime
>>> from resistics.testing import decimated_data_linear
>>> from resistics.window import WindowSetup, Windower
>>> dec_data = decimated_data_linear(fs=128)
>>> ref_time = dec_data.metadata.first_time
>>> print(dec_data.to_string())
<class 'resistics.decimate.DecimatedData'>
fs dt n_samples first_time last_time
level
0 2048.0 0.000488 16384 2021-01-01 00:00:00 2021-01-01 00:00:07.99951171875
1 512.0 0.001953 4096 2021-01-01 00:00:00 2021-01-01 00:00:07.998046875
2 128.0 0.007812 1024 2021-01-01 00:00:00 2021-01-01 00:00:07.9921875
Next, initialise the window parameters. For this example, use small windows,
which will make inspecting them easier.
>>> win_params = WindowSetup(win_sizes=[16,16,16], min_olap=4).run(dec_data.metadata.n_levels, dec_data.metadata.fs)
>>> win_params.summary()
{
'n_levels': 3,
'min_n_wins': 5,
'win_sizes': [16, 16, 16],
'olap_sizes': [4, 4, 4]
}
Perform the windowing. This actually creates views into the decimated data
using the numpy.lib.stride_tricks.sliding_window_view function. The shape
for a data array at a decimation level is: n_wins x n_chans x win_size. The
information about each level is also in the levels_metadata attribute of
WindowedMetadata.
>>> win_data = Windower().run(ref_time, win_params, dec_data)
>>> win_data.data[0].shape
(1365, 2, 16)
>>> for level_metadata in win_data.metadata.levels_metadata:
... level_metadata.summary()
{
'fs': 2048.0,
'n_wins': 1365,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
{
'fs': 512.0,
'n_wins': 341,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
{
'fs': 128.0,
'n_wins': 85,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
Let's look at an example of data from the first decimation level for the
first channel. This is simply a linear set of data ranging from 0...16_383.
>>> dec_data.data[0][0]
array([ 0, 1, 2, ..., 16381, 16382, 16383])
Inspecting the first few windows shows they are as expected including the
overlap.
>>> win_data.data[0][0, 0]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
>>> win_data.data[0][1, 0]
array([12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27])
>>> win_data.data[0][2, 0]
array([24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
"""
def run(
self,
ref_time: RSDateTime,
win_params: WindowParameters,
dec_data: DecimatedData,
) -> WindowedData:
"""
Perform windowing of DecimatedData
Parameters
----------
ref_time : RSDateTime
The reference time
win_params : WindowParameters
The window parameters
dec_data : DecimatedData
The decimated data
Returns
-------
WindowedData
Windows for decimated data
Raises
------
ProcessRunError
If the number of windows calculated in the window table does not
match the size of the array views
"""
metadata_dict = dec_data.metadata.dict()
data = {}
win_levels_metadata = []
messages = []
for ilevel in range(0, dec_data.metadata.n_levels):
logger.info(f"Windowing decimation level {ilevel}")
win_size = win_params.get_win_size(ilevel)
olap_size = win_params.get_olap_size(ilevel)
level_metadata = dec_data.metadata.levels_metadata[ilevel]
win_table = get_win_table(ref_time, level_metadata, win_size, olap_size)
n_wins = len(win_table.index)
logger.info(f"{n_wins} windows, size {win_size}, overlap {olap_size}")
messages.append(f"Level {ilevel}, generated {n_wins} windows")
messages.append(f"Window size {win_size}, olap_size {olap_size}")
if n_wins < win_params.min_n_wins:
logger.debug(f"Number windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Num. windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Level {ilevel} incomplete, terminating windowing")
break
win_level_data = self._get_level_data(
dec_data.get_level(ilevel),
win_table,
dec_data.metadata.n_chans,
win_size,
olap_size,
)
if win_level_data.shape[0] != n_wins:
raise ProcessRunError(
self.name,
f"Num. windows mismatch {win_level_data.shape[0]} != {n_wins}",
)
win_level_metadata = self._get_level_metadata(
level_metadata,
win_table,
win_size,
olap_size,
)
data[ilevel] = win_level_data
win_levels_metadata.append(win_level_metadata)
metadata_dict["ref_time"] = ref_time
metadata = self._get_metadata(metadata_dict, win_levels_metadata)
metadata.history.add_record(self._get_record(messages))
logger.info("Windowing completed")
return WindowedData(metadata, data)
def _get_level_data(
self,
data: np.ndarray,
win_table: pd.DataFrame,
n_chans: int,
win_size: int,
olap_size: int,
) -> np.ndarray:
"""
Get window data for a decimation level
Parameters
----------
data : np.ndarray
The decimated time data for the level
win_table : pd.DataFrame
The window table
n_chans : int
The number of channels
win_size : int
The window size
olap_size : int
The overlap size
Returns
-------
np.ndarray
Sliding window views in an array for the decimation level
"""
from numpy.lib.stride_tricks import sliding_window_view
from_sample = win_table.loc[0, "from_sample"]
to_sample = win_table.loc[win_table.index[-1], "from_sample"]
increment_size = win_size - olap_size
view = np.squeeze(
sliding_window_view(data, window_shape=(n_chans, win_size), writeable=True)
)
return view[from_sample : to_sample + 1 : increment_size]
def _get_level_metadata(
self,
level_metadata: DecimatedLevelMetadata,
win_table: pd.DataFrame,
win_size: int,
olap_size: int,
) -> WindowedLevelMetadata:
"""Get the windowed metadata for a decimation level"""
offset = (win_table["global"] - win_table["local"]).unique()
if len(offset) != 1:
raise ValueError("Malformed window table, varying local to global offset")
return WindowedLevelMetadata(
fs=level_metadata.fs,
n_wins=len(win_table.index),
win_size=win_size,
olap_size=olap_size,
index_offset=offset[0],
)
def _get_metadata(
self,
metadata_dict: Dict[str, Any],
levels_metadata: List[WindowedLevelMetadata],
) -> WindowedMetadata:
"""Get the metadata for the windowed data"""
metadata_dict.pop("file_info")
metadata_dict["n_levels"] = len(levels_metadata)
metadata_dict["levels_metadata"] = levels_metadata
return WindowedMetadata(**metadata_dict)
class WindowerTarget(Windower):
"""
Windower that selects window sizes to meet a target number of windows
The minimum window size in window parameters will be respected even if the
generated number of windows is below the target. This is to avoid situations
where excessively small windows sizes are selected.
.. warning::
This process is primarily useful for quick processing of a single
measurement and should not be used when any alignment of windows is
required within a site or across sites.
Parameters
----------
target : int
The target number of windows for each decimation level
olap_proportion : float
The overlap proportion of the window size
See Also
--------
Windower : The window making process to use when alignment is required
"""
target: int = 1000
min_size: int = 64
olap_proportion: float = 0.25
def run(
self,
ref_time: RSDateTime,
win_params: WindowParameters,
dec_data: DecimatedData,
) -> WindowedData:
metadata_dict = dec_data.metadata.dict()
data = {}
win_levels_metadata = []
messages = []
for ilevel in range(0, dec_data.metadata.n_levels):
logger.info(f"Windowing decimation level {ilevel}")
level_metadata = dec_data.metadata.levels_metadata[ilevel]
win_size = self._get_win_size(level_metadata)
olap_size = int(np.floor(self.olap_proportion * win_size))
win_table = get_win_table(ref_time, level_metadata, win_size, olap_size)
n_wins = len(win_table.index)
logger.info(f"{n_wins} windows, size {win_size}, overlap {olap_size}")
messages.append(f"Level {ilevel}, generated {n_wins} windows")
messages.append(f"Window size {win_size}, olap_size {olap_size}")
if n_wins < win_params.min_n_wins:
logger.debug(f"Number windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Num. windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Level {ilevel} incomplete, terminating windowing")
break
win_level_data = self._get_level_data(
dec_data.get_level(ilevel),
win_table,
dec_data.metadata.n_chans,
win_size,
olap_size,
)
win_level_metadata = self._get_level_metadata(
level_metadata,
win_table,
win_size,
olap_size,
)
data[ilevel] = win_level_data
win_levels_metadata.append(win_level_metadata)
metadata_dict["ref_time"] = metadata_dict["first_time"]
metadata = self._get_metadata(metadata_dict, win_levels_metadata)
metadata.history.add_record(self._get_record(messages))
logger.info("Windowing completed")
return WindowedData(metadata, data)
def _get_win_size(self, level_metadata: DecimatedLevelMetadata) -> int:
r"""
Get window size that gives close to the target number of windows
Windows increment by (window size - overlap size), therefore the
follwing equation is solved,
.. math::
n_{samples} / ((1 - n_{overlap})*n_{window}) = target
Rearrangning, get,
.. math::
n_{window} = n_{samples} / ((1 - n_{overlap})*target)
Parameters
----------
level_metadata : DecimatedLevelMetadata
The metadata for the decimation level
Returns
-------
int
The window size
"""
win_size = level_metadata.n_samples / ((1 - self.olap_proportion) * self.target)
win_size = int(np.floor(win_size))
if win_size < self.min_size:
return self.min_size
return win_size
class WindowedDataWriter(ResisticsWriter):
"""Writer of resistics windowed data"""
def run(self, dir_path: Path, win_data: WindowedData) -> None:
"""
Write out WindowedData
Parameters
----------
dir_path : Path
The directory path to write to
win_data : WindowedData
Windowed data to | |
device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_linalg_lu_factor_and_lu(self, device, dtype):
# Tests lu, linalg.lu_factor and linalg.lu_factor_ex
from torch.testing._internal.common_utils import random_matrix
def run_test(A, pivot, singular, fn):
k = min(A.shape[-2:])
batch = A.shape[:-2]
check_errors = (fn == torch.linalg.lu_factor)
if singular and check_errors:
# It may or may not throw as the LU decomposition without pivoting
# may still succeed for singular matrices
try:
LU, pivots = fn(A, pivot=pivot)
except RuntimeError:
return
else:
LU, pivots = fn(A, pivot=pivot)[:2]
self.assertEqual(LU.size(), A.shape)
self.assertEqual(pivots.size(), batch + (k,))
if not pivot:
self.assertEqual(pivots, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(batch + (k, )))
P, L, U = torch.lu_unpack(LU, pivots)
self.assertEqual(P @ L @ U, A)
sizes = ((3, 3), (5, 5), (4, 2), (3, 4), (0, 0), (0, 1), (1, 0))
batches = ((0,), (2,), (3,), (1, 0), (3, 5))
# Non pivoting just implemented for CUDA
pivots = (True, False) if self.device_type == "cuda" else (True,)
fns = (partial(torch.lu, get_infos=True), torch.linalg.lu_factor, torch.linalg.lu_factor_ex)
for ms, batch, pivot, singular, fn in itertools.product(sizes, batches, pivots, (True, False), fns):
m, n = ms
A = random_matrix(m, n, *batch, singular=singular, dtype=dtype, device=device)
# Just do one of them on singular matrices
if A.numel() == 0 and not singular:
continue
run_test(A, pivot, singular, fn)
# Reproducer of a magma bug,
# see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
# This is also a bug in cuSOLVER < 11.3
if (dtype == torch.double
and singular
and (torch.version.cuda is None or
torch.version.cuda.split('.') >= ["11", "3"])):
A = torch.ones(batch + ms, dtype=dtype, device=device)
run_test(A, pivot, singular, fn)
# Info should be positive for rank deficient matrices
A = torch.ones(5, 3, 3, device=device)
self.assertTrue((torch.linalg.lu_factor_ex(A, pivot=True).info >= 0).all())
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
torch.lu(torch.empty(1, 2, 2), pivot=False)
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
torch.linalg.lu_factor(torch.empty(1, 2, 2), pivot=False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-3})
def test_lu_unpack(self, device, dtype):
def run_test(pivot):
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape, dtype=dtype, device=device)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3),
(3, 5), (5, 3), (3, 3, 5), (3, 5, 3),
(7, 5, 3, 5, 3), (7, 5, 3, 3, 5),
# empty tensors
(0, 0), (0, 0, 0), (0, 3, 3)
):
a = make_tensor(shape, dtype=dtype, device=device, low=-0.1, high=+0.1)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
run_test(True)
if self.device_type == 'cuda':
run_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.lu(x, pivot=True)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
with self.assertRaisesRegex(RuntimeError, "contiguous tensor"):
torch.lu_unpack(lu_data, lu_pivots.mT)
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue((l == u) and l is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue((p == l == u) and p is None)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
| |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# -----------
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 <NAME>
# uuid : 633f2088-bbe3-11eb-b9c2-33be0bb8451e
# author: <NAME>
# email : <EMAIL>
# date : 2021-05-23
# -----------
"""
The `repair` command has access to tools that can repair various
problems that could occur.
- bad-links
- relative links that don't point to the correct file
- section attributes
- ATX headers that are missing links
--dry-run
"""
# ------------
# System Modules - Included with Python
import hashlib
from pathlib import Path
from datetime import datetime
from difflib import get_close_matches
# ------------
# 3rd Party - From pip
import click
from rich.console import Console
console = Console()
# ------------
# Custom Modules
from ..documentos.common import (
relative_path,
search,
)
from ..documentos.document import (
MarkdownDocument,
search as md_search,
document_lookup,
)
from ..documentos.markdown_classifiers import MarkdownAttributeSyntax
# -------------
def find_broken_urls(
parent=None,
links=None,
):
"""
Examine the relative links for the MarkdownDocument object and
return a list contain links that don't have matches on the file
system.
Can work for images or relative links pointing to markdown files.
# Parameters
parent:Path
- The path of the parent folder to resolve links
links:list(tuple)
- A list of tuples containing:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
- The `url` key is the required and is the URL of the
relative link
# Return
a list of tuples that contains the problem link and line number.
item:
- line number (0 based)
- dict
- 'url' - The URL portion of the markdown link
"""
problems = []
for rurl in links:
# we only want the URL, not any section anchors
left, _, _ = rurl[1]["url"].partition("#")
file = parent.joinpath(left).resolve()
if not file.exists():
problems.append(rurl)
return problems
def classify_broken_urls(
lookup=None,
broken_urls=None,
):
"""
Using the lookup dictionary and the list of broken URLS, sort the
broken URLS for further processing. Sort them into
- `no match` - There is no match on the file system for the URLs
- `file match` - There are matching file names on the system
- `suggestions` - There are no-matching file names, but some of the
file names are close
# Parameters
lookup:dict
- A dictionary keyed by the file name mapped to a list of
MarkdownDocument objects that have the same name but
different paths.
broken_urls:list
- a list of tuples that contains the problem link and line
number.
- item:
- line number (0 based)
- dict
- 'full' - The full regex match - [text](link)
- 'text' - The text portion of the markdown link
- 'url' - The URL portion of the markdown link
- "md_span": result.span("md"), # tuple(start, end) <- start and end position of the match
- "md": result.group("md"),
- "section_span": result.span("section"),
- "section": section attribute i.e ../file.md#id <- the id portion,
# Return
A dictionary keyed by:
- no_matches - no matches were found, this is a list of the broken
urls
- exact_matches - Direct matches in the file system were found, this
is a tuple of the broken url and a list of MarkdownDocument
objects
- The name of the file has an exact match in the system, or a
number of matches
- multiple exact matches fount
- exact_match - Only one exact match found
- suggestions - Closes matches found in the file system, this is a
tuple of the broken url and a list of MarkdownDocument objects
- This may not be an ideal case or even correct.
Each key will contain a list of tuples: (dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
"""
results = {
"no_matches": [],
"suggestions": [],
"exact_match": [],
"exact_matches": [],
}
for problem in broken_urls:
line, url = problem
# we only want the URL, not any section anchors
left, _, _ = url["url"].partition("#")
key = Path(left).name
if key in lookup:
matches = [match for match in lookup[key]]
if len(matches) == 1:
results["exact_match"].append((problem, matches))
else:
results["exact_matches"].append((problem, matches))
else:
# https://docs.python.org/3/library/difflib.html#difflib.get_close_matches
# Can we suggest anything?
suggestions = get_close_matches(key, lookup.keys(), cutoff=0.8)
if suggestions:
results["suggestions"].append(
(problem, [match for pk in suggestions for match in lookup[pk]])
)
else:
# We don't have a file match or any suggestions - a dead
# end :(
results["no_matches"].append((problem, []))
return results
def display_classified_url(results, root=None):
"""
# Parameters
results:list
- A list containing a reference to a MarkdownDocument and a list
of tuples containing line, url (dict) and the list of
matches (MarkdownDocument)
root:Path
- The path to the root of the document folder
"""
for item in results:
md, problems = item
md_relative = md.filename.relative_to(root)
for defect, matches in problems:
line, url = defect
console.print(f"File: {md_relative}")
console.print(f'Line: {line} -> `{url["full"]}`')
for i, match in enumerate(matches, start=1):
console.print(f"{i}. -> {match.filename.relative_to(root)}")
console.print("")
def write_corrected_url(
md=None,
problems=None,
root=None,
dry_run=False,
):
"""
# Parameters
md:MarkdownDocument
- The document we need to correct the URLs
problems:list(dict, list)
- dict - this is the same dict that was in the broken_urls list
- list - the list of Path objects that match or are similar
root:Path
- The path to the root of the document folder
"""
console.print(f"File: {md.filename.relative_to(root)}")
for defect, matches in problems:
line, url = defect
match = (
matches[0].filename
if isinstance(matches[0], MarkdownDocument)
else matches[0]
) # assume pathlib.Path
new_url = relative_path(
md.filename.parent,
match.parent,
).joinpath(match.name)
left, _, _ = url["url"].partition("#")
new_line = md.contents[line].replace(left, str(new_url))
console.print(f"Line: {line} - Replacing `{left}` -> `{new_url}`")
md.contents[line] = new_line
if dry_run:
console.print("------DRY-RUN------")
else:
with md.filename.open("w", encoding="utf-8") as fo:
for line in md.contents:
fo.write(line)
console.print("Changes written...")
def display_and_fix_issues(results, root=None, dry_run=False):
""" """
messages = {
"no_matches": [
"NO MATCHES",
"The following files had no matches or any close matches within the system.",
],
"suggestions": [
"SUGGESTIONS",
"The following files did not have any exact matches within the system but they had some close matches.",
],
"exact_matches": [
"EXACT MATCHES",
"The following files have multiple exact matches within the system.",
],
"exact_match": [
"EXACT MATCHES",
"The following files have a single, exact match within the system.",
],
}
# Display the files that had problems we can't repair automatically
for key in (k for k in messages.keys() if k != "exact_match"):
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
display_classified_url(results[key], root=root)
# Display and repair the files we can fix
key = "exact_match"
if results[key]:
console.print("-" * 6)
for msg in messages[key]:
console.print(msg)
console.print("")
for item in results[key]:
md, problems = item
write_corrected_url(
md,
problems,
root=root,
dry_run=dry_run,
)
console.print("")
if dry_run:
console.print(f"Exact Matches - {len(results[key])} files corrected!")
console.print("-" * 6)
def find_missing_header_attributes(
files=None,
root=None,
display_problems=False,
):
"""
# Parameters
files:list(MarkdownDocument)
- The list of MarkdownDocument objects to search for missing
header attributes
root:Path
- The path to the root of the document folder
display_problems:bool
- If true, it will display the problems as it finds them
- Default - False
# Return
A dictionary keyed with the MarkdownDocument object that has missing
attributes mapped to the list of missing attributes which are a
tuple (line number, line text)
"""
md_attribute_syntax_rule = MarkdownAttributeSyntax()
problems = {}
for md in files:
# md.headers() A dictionary keyed by header depth (1 to 6) with
# a list of tuples containing line numbers containing the ATX
# header at that depth and the text of the header(23, "
# [hello World](./en.md) ")
missing_attributes = []
for _, headers in md.headers.items():
for h in headers:
number, text = h
if not md_attribute_syntax_rule.match(text):
missing_attributes.append(h)
if display_problems:
console.print(
f"MISSING ATTRIBUTE: `{md.filename.relative_to(root)}` - Line: {number} - `{text}`"
)
if missing_attributes:
problems[md] = missing_attributes
return problems
def repair_header_issues(
issues,
root=None,
dry_run=False,
):
"""
# Parameters
issues:dict
- A dictionary keyed by the MarkdownDocument object with header
issues. It is mapped to a list of tuples (line number, header
text)
root:Path
- The path to the root of the document folder
dry_run:bool
- If true, it will not write changes
- Default - False
"""
for md, | |
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special `:modifiers`.
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are processed by Python's ast.literal_eval function
(U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Whitelisted in configuration by default.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
'''
EXAMPLES = '''# fmt: ini
# Example 1
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to parent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'<PASSWORD>'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
# Example 2
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
'''
import ast
import re
from ansible.inventory.group import to_safe_group_name
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse | |
<gh_stars>1-10
# Copyright 2016, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This test file is part of FB-PIC (Fourier-Bessel Particle-In-Cell).
It tests the global PIC loop by launching a linear periodic plasma wave,
and letting it evolve in time. Its fields are then compared with theory.
This tests is run both for linear and cubic shapes.
No moving window is involved, and periodic conditions are userd.
Usage:
------
In order to show the images of the laser, and manually check the
agreement between the simulation and the theory:
(except when setting show to False in the parameters below)
$ python tests/test_periodic_plasma_wave.py # Single-proc simulation
$ mpirun -np 2 python tests/test_periodic_plasma_wave.py # Two-proc simulation
In order to let Python check the agreement between the curve without
having to look at the plots
$ py.test -q tests/test_periodic_plasma_wave.py
or
$ python setup.py test
Theory:
-------
The fields are given by the analytical formulas :
$$ \phi =
\epsilon \,\frac{m c^2}{e}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \sin(\omega_p t)
+ \epsilon_1 \,\frac{m c^2}{e} \frac{2\,r\cos(\theta)}{w_0}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{4\,r^2\cos(2\theta)}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)$$
$$ E_r = -\partial_r \phi =
\epsilon \,\frac{mc^2}{e}\frac{2\,r}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \sin(\omega_p t)
- \epsilon_1 \,\frac{m c^2}{e} \frac{2\cos(\theta)}{w_0}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_1 \,\frac{m c^2}{e} \frac{4\,r^2\cos(\theta)}{w_0^3}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
- \epsilon_2 \,\frac{m c^2}{e} \frac{8\,r\cos(2\theta)}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{8\,r^3\cos(2\theta)}{w_0^4}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t) $$
$$ E_\theta = - \frac{1}{r} \partial_\theta \phi =
\epsilon_1 \,\frac{m c^2}{e} \frac{2\,\sin(\theta)}{w_0}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{8\,r\sin(2\theta)}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t) $$
$$ E_x = \cos(\theta)E_r - \sin(\theta)E_\theta =
\epsilon \,\frac{mc^2}{e}\frac{2\,x}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \sin(\omega_p t)
- \epsilon_1 \,\frac{m c^2}{e} \frac{2}{w_0}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_1 \,\frac{m c^2}{e} \frac{4\,x^2}{w_0^3}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
- \epsilon_2 \,\frac{m c^2}{e} \frac{8\,x}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{8\,x(x^2-y^2)}{w_0^4}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t) $$
$$ E_y = \sin(\theta)E_r + \cos(\theta)E_\theta =
\epsilon \,\frac{mc^2}{e}\frac{2\,y}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \sin(\omega_p t)
+ \epsilon_1 \,\frac{m c^2}{e} \frac{4\,x y}{w_0^3}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{8\,y}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t)
+ \epsilon_2 \,\frac{m c^2}{e} \frac{8\,y(x^2-y^2)}{w_0^4}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \sin(\omega_p t) $$
$$ E_z = -\partial_z \phi =
- \epsilon \,\frac{mc^2}{e} k_0
\exp\left(-\frac{r^2}{w_0^2}\right) \cos(k_0 z) \sin(\omega_p t)
- \epsilon_1 \,\frac{m c^2}{e} \frac{2\,r\cos(\theta)}{w_0} k_0
\exp\left(-\frac{r^2}{w_0^2}\right)\cos(k_0 z) \sin(\omega_p t)
- \epsilon_2 \, \frac{m c^2}{e} \frac{4\,r^2\cos(\theta)}{w_0^2} k_0
\exp\left(-\frac{r^2}{w_0^2}\right)\cos(k_0 z) \sin(\omega_p t) $$
$$ v_x/c =
\epsilon \, \frac{c}{\omega_p} \, \frac{2\,x}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \cos(\omega_p t)
- \epsilon_1 \,\frac{c}{\omega_p} \frac{2}{w_0}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t)
+ \epsilon_1 \,\frac{c}{\omega_p} \frac{4\,x^2}{w_0^3})
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t)
- \epsilon_2 \,\frac{c}{\omega_p} \frac{8\,x}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t)
+ \epsilon_2 \,\frac{c}{\omega_p} \frac{8\,x(x^2-y^2)}{w_0^4}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t) $$
$$ v_y/c =
\epsilon \, \frac{c}{\omega_p} \, \frac{2\,y}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right) \sin(k_0 z) \cos(\omega_p t)
+ \epsilon_1 \,\frac{c}{\omega_p} \frac{4\,x y}{w_0^3})
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t)
+ \epsilon_2 \,\frac{c}{\omega_p} \frac{8\,y}{w_0^2}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t)
+ \epsilon_2 \,\frac{c}{\omega_p} \frac{8\,y(x^2-y^2)}{w_0^4}
\exp\left(-\frac{r^2}{w_0^2}\right)\sin(k_0 z) \cos(\omega_p t) $$
$$ v_z/c =
- \epsilon \, \frac{c}{\omega_p} \, k_0
\exp\left(-\frac{r^2}{w_0^2}\right) \cos(k_0 z) \cos(\omega_p t)
- \epsilon_1 \,\frac{c}{\omega_p} \frac{2\,x}{w_0} k_0
\exp\left(-\frac{r^2}{w_0^2}\right) \cos(k_0 z) \cos(\omega_p t)
- \epsilon_2 \,\frac{c}{\omega_p} \frac{4\,(x^2-y^2)}{w_0^2} k_0
\exp\left(-\frac{r^2}{w_0^2}\right) \cos(k_0 z) \cos(\omega_p t)$$
where $\epsilon$ is the dimensionless amplitude of the mode 0 and
$\epsilon_1$, $\epsilon_2$ are the dimensionless amplitudes of modes 1 and 2.
"""
import numpy as np
from scipy.constants import c, e, m_e, epsilon_0
# Import the relevant structures in FBPIC
from fbpic.main import Simulation
from fbpic.fields import Fields
# Parameters
# ----------
show = True # Whether to show the comparison between simulation
# and theory to the user, or to automatically determine
# whether they agree.
use_cuda=True # Whether to run with cuda
# The simulation box
Nz = 200 # Number of gridpoints along z
zmax = 40.e-6 # Length of the box along z (meters)
Nr = 64 # Number of gridpoints along r
rmax = 20.e-6 # Length of the box along r (meters)
Nm = 3 # Number of modes used
n_order = 16 # Order of the finite stencil
# The simulation timestep
dt = zmax/Nz/c # Timestep (seconds)
# The particles
p_zmin = 0.e-6 # Position of the beginning of the plasma (meters)
p_zmax = 41.e-6 # Position of the end of the plasma (meters)
p_rmin = 0. # Minimal radial position of the plasma (meters)
p_rmax = 18.e-6 # Maximal radial position of the plasma (meters)
n_e = 2.e24 # Density (electrons.meters^-3)
p_nz = 2 # Number of particles per cell along z
p_nr = 2 # Number of particles per cell along r
p_nt = 8 # Number of particles per cell along theta
# The plasma wave
epsilon = 0.001 # Dimensionless amplitude of the wave in mode 0
epsilon_1 = 0.001 # Dimensionless amplitude of the wave in mode 1
epsilon_2 = 0.001 # Dimensionless amplitude of the wave in mode 2
epsilons = [ epsilon, epsilon_1, epsilon_2 ]
w0 = 5.e-6 # The transverse size of the plasma wave
N_periods = 3 # Number of periods in the box
# Calculated quantities
k0 = 2*np.pi/zmax*N_periods
wp = np.sqrt( n_e*e**2/(m_e*epsilon_0) )
# Run the simulation for 0.75 plasma period
N_step = int( 2*np.pi/(wp*dt)*0.75 )
# -------------
# Test function
# -------------
def test_periodic_plasma_wave_linear_shape( show=False ):
"Function that is run by py.test, when doing `python setup.py test"
simulate_periodic_plasma_wave( 'linear', show=show )
def test_periodic_plasma_wave_cubic_shape( show=False ):
"Function that is run by py.test, when doing `python setup.py test"
simulate_periodic_plasma_wave( 'cubic', show=show )
def simulate_periodic_plasma_wave( particle_shape, show=False ):
"Simulate a periodic plasma wave and check its fields"
# Initialization of the simulation object
sim = Simulation( Nz, zmax, Nr, rmax, Nm, dt,
p_zmin, p_zmax, p_rmin, p_rmax, p_nz, p_nr,
p_nt, n_e, n_order=n_order, use_cuda=use_cuda,
particle_shape=particle_shape )
# Save the initial density in spectral space, and consider it
# to be the density of the (uninitialized) ions
sim.deposit('rho_prev', exchange=True)
sim.fld.spect2interp('rho_prev')
rho_ions = [ ]
for m in range(len(sim.fld.interp)):
rho_ions.append( -sim.fld.interp[m].rho.copy() )
# Impart velocities to the electrons
# (The electrons are initially homogeneous, but have an
# intial non-zero velocity that develops into a plasma wave)
impart_momenta( sim.ptcl[0], epsilons, k0, w0, wp )
# Run the simulation
sim.step( N_step, correct_currents=True )
# Plot the results and compare with analytical theory
compare_fields( sim, show )
# Test check that div(E) - rho = 0 (directly in spectral space)
check_charge_conservation( sim, rho_ions )
# -----------------------------------------
# Analytical solutions for the plasma wave
# -----------------------------------------
def Er( z, r, epsilons, k0, w0, wp, t) :
"""
Return the radial electric field as an array
of the same length as z and r, in the half-plane theta=0
"""
Er_array = \
epsilons[0] * m_e*c**2/e * 2*r/w0**2 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) \
- epsilons[1] * m_e*c**2/e * 2/w0 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) \
+ epsilons[1] * m_e*c**2/e * 4*r**2/w0**3 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) \
- epsilons[2] * m_e*c**2/e * 8*r/w0**2 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t ) \
+ epsilons[2] * m_e*c**2/e * 8*r**3/w0**4 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.sin( wp*t )
return( Er_array )
def Ez( z, r, epsilons, k0, w0, wp, t) :
"""
Return the longitudinal electric field as an array
of the same length as z and r, in the half-plane theta=0
"""
Ez_array = \
- epsilons[0] * m_e*c**2/e * k0 * \
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) \
- epsilons[1] * m_e*c**2/e * k0 * 2*r/w0 * \
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t ) \
- epsilons[2] * m_e*c**2/e * k0 * 4*r**2/w0**2 * \
np.exp( -r**2/w0**2 ) * np.cos( k0*z ) * np.sin( wp*t )
return( Ez_array )
def ux( z, r, x, y, epsilons, k0, w0, wp, t) :
"""
Return the radial normalized velocity as an array
of the same length as z, r, x, y
"""
ux_array = \
epsilons[0] * c/wp * 2*x/w0**2 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.cos( wp*t ) \
- epsilons[1] * c/wp * 2/w0 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.cos( wp*t ) \
+ epsilons[1] * c/wp * 4*x**2/w0**3 * \
np.exp( -r**2/w0**2 ) * np.sin( k0*z ) * np.cos( wp*t ) \
- epsilons[2] * c/wp * 8*x/w0**2 * \
np.exp( -r**2/w0**2 ) * np.sin( | |
import keras
from keras.models import Model
from keras.layers import Input,Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import multi_gpu_model
from keras.utils import plot_model
from keras import losses
import os
import tensorflow as tf
from keras import backend as K
import DataGenerator as dg
import get_modelv2_3
import get_model
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
from keras.models import load_model
import re
import seaborn as sns
from sklearn.linear_model import LinearRegression
import scipy
import warnings
import sys
from sklearn.metrics import roc_curve, auc
import time
warnings.filterwarnings('ignore')
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2'
class multi_task_training_respect:
def __init__(self):
self.model = keras.Model()
self.model_class_task= keras.Model()
self.model_reg_task= keras.Model()
self.lr1 = 0.0001
self.lr2 = 0.0001
self.alpha = 0.5
self.patience_class = 6
self.patience_reg = 6
self.font1 = {
'weight': 'normal',
'size': 16,
}
self.font2 = {
'weight': 'normal',
'size': 23,
}
def get_batch_data(self,prot, comp, y, batch_count, batch_size, batch_count_per_epoch):
batch_count = batch_count % batch_count_per_epoch
batch_prot = prot[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_comp = comp[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_y = y[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
return batch_prot, batch_comp, batch_y
def draw_loss_and_accuracy_curve(self,history_class, history_class_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_accuracy = []
vali_accuracy = []
for tmp in history_class:
train_loss.append(tmp[0])
train_accuracy.append(tmp[1])
for tmp in history_class_vali:
vali_loss.append(tmp[0])
vali_accuracy.append(tmp[1])
epochs = range(1, len(history_class) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_loss, 'b', label='Classification training loss')
plt.plot(epochs, vali_loss, 'r', label='Classification validation loss')
plt.title('Classification Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_accuracy, 'b', label='Classification training accuracy')
plt.plot(epochs, vali_accuracy, 'r', label='Classification validation accuracy')
plt.title('Training and Validation Accuracy', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Accuracy', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_accuracy.png' % model_name)
def draw_loss_and_mse_curve(self,history_reg, history_reg_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_mse = []
vali_mse = []
for tmp in history_reg:
train_loss.append(tmp[0])
train_mse.append(tmp[1])
for tmp in history_reg_vali:
vali_loss.append(tmp[0])
vali_mse.append(tmp[1])
epochs = range(1, len(history_reg) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10.3, 10))
plt.plot(epochs, train_loss, 'b', label='Regression training loss')
plt.plot(epochs, vali_loss, 'r', label='Regression validation loss')
plt.title('Regression Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_mse, 'b', label='Regression training mse')
plt.plot(epochs, vali_mse, 'r', label='Regression validation mse')
plt.title('Regression Training and Validation MSE', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('MSE', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_mse.png' % model_name)
def mean_squared_error_l2(self,y_true, y_pred, lmbda=0.01):
cost = K.mean(K.square(y_pred - y_true))
# weights = self.model.get_weights()
weights = []
for layer in self.model_reg_task.layers:
# print(layer)
weights = weights + layer.get_weights()
# print (weights)
result = tf.reduce_sum([tf.reduce_sum(tf.pow(wi, 2)) for wi in weights])
l2 = lmbda * result # K.sum([K.square(wi) for wi in weights])
return cost + l2
def train_model(self,class_training_file,class_validation_file,reg_training_file,reg_validation_file,model_name,
reg_batch_size=128,class_batch_size=128,class_epoch = 50,reg_epoch = 100,
pro_branch_switch1 = 'inception_block', pro_branch_switch2 = 'inception_block',
pro_branch_switch3='inception_block_b', pro_add_attention = False,
comp_branch_switch1 = 'inception_block', comp_branch_switch2 = 'inception_block',
comp_branch_switch3 = 'inception_block_b', comp_add_attention = False):#reg_size=256
##2.get_model
save_dir = os.path.join(os.getcwd(), 'models',model_name)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
self.model_class_task, self.model_reg_task = get_model.get_multi_model(save_dir, self.alpha,
pro_branch_switch1=pro_branch_switch1,pro_branch_switch2=pro_branch_switch2,
pro_branch_switch3=pro_branch_switch3,pro_add_attention=pro_add_attention,
comp_branch_switch1=comp_branch_switch1,comp_branch_switch2=comp_branch_switch2,
comp_branch_switch3=comp_branch_switch3,comp_add_attention=comp_add_attention)
optimizer1 = keras.optimizers.Adam(lr=self.lr1)
self.model_reg_task.compile(optimizer=optimizer1,
loss=self.mean_squared_error_l2,#'mean_squared_error'
metrics=['mse','mae'])
optimizer2 = keras.optimizers.Adam(lr=self.lr2)
self.model_class_task.compile(optimizer=optimizer2,loss='binary_crossentropy',metrics=['accuracy'])
##1.read data
print("Starting read reg training data:")
reg_train_generator = dg.read_reg_generator(reg_training_file, reg_batch_size)
reg_vali_prot, reg_vali_comp, reg_vali_value = dg.read_reg(reg_validation_file)
print('regression validation data shape:', len(reg_vali_prot))
class_train_generator = dg.read_class_generator(class_training_file, class_batch_size)
class_vali_prot, class_vali_comp, class_vali_label = dg.read_class(class_validation_file)
print('classification validation data shape:', len(class_vali_prot))
##3.training model
#before train prepare
batch_count_of_class=0
batch_count_per_epoch_class=189109//class_batch_size
batch_count_of_reg = 0
batch_count_per_epoch_reg = 18071 // reg_batch_size
epoch_class = 0
epoch_reg=0
history_class=[]
history_class_vali=[]
history_reg=[]
history_reg_vali=[]
class_erally_stop_flag=1
reg_erally_stop_flag = 1
class_batch_count = class_epoch * batch_count_per_epoch_class
reg_batch_count = reg_epoch * batch_count_per_epoch_reg
K = reg_batch_count/class_batch_count
total_batch_count=class_batch_count+reg_batch_count
#start train
reg_min_loss = float('inf')
reg_min_loss_index = 0
class_min_loss=float('inf')
class_min_loss_index=0
best_reg_model = None
best_class_model = None
best_reg_file = save_dir + "/%s_best_reg_model.hdf5" % model_name
best_class_file = save_dir + "/%s_best_class_model.hdf5" % model_name
reg_loss=[]
class_loss=[]
for i in range(total_batch_count):
#regression
if np.random.rand() * (1+K) >= 1 and reg_erally_stop_flag and epoch_reg<reg_epoch:
print('batch %d(reg):'%i)
reg_batch_prot, reg_batch_comp, reg_batch_value = next(reg_train_generator)
tmp_loss=self.model_reg_task.train_on_batch([reg_batch_prot, reg_batch_comp], reg_batch_value)
reg_loss.append(tmp_loss)
batch_count_of_reg+=1
if batch_count_of_reg % batch_count_per_epoch_reg==0 and batch_count_of_reg>0:
epoch_reg += 1
print("regression epoch %d:"%epoch_reg)
#train performance:loss, mse, mae
print(' regression training loss=',np.mean(reg_loss,axis=0))
history_reg.append(np.mean(reg_loss,axis=0))
reg_loss=[]
#validation performance
score=self.model_reg_task.evaluate([reg_vali_prot,reg_vali_comp],reg_vali_value)
print(' regression evaluation loss=',score)
history_reg_vali.append(score)
#checkpoint and earlly stop
if epoch_reg-reg_min_loss_index>=self.patience_reg:
reg_erally_stop_flag=0
if score[0]<reg_min_loss:
reg_min_loss_index=epoch_reg
reg_min_loss=score[0]
#checkpoint
best_reg_model = self.model_reg_task
# classification
else:
if class_erally_stop_flag and epoch_class<class_epoch:
print('batch %d(class):' % i)
class_batch_prot, class_batch_comp, class_batch_label = next(class_train_generator)
tmp_loss=self.model_class_task.train_on_batch([class_batch_prot, class_batch_comp], class_batch_label)
class_loss.append(tmp_loss)
batch_count_of_class += 1
if batch_count_of_class % batch_count_per_epoch_class == 0 and batch_count_of_class>0:
epoch_class += 1
print("classification epoch %d:"%epoch_class)
# train performance:loss, mse, mae
print(' classification training loss=',np.mean(class_loss,axis=0))
history_class.append(np.mean(class_loss,axis=0))
class_loss=[]#
accuracy = self.model_class_task.evaluate([class_vali_prot, class_vali_comp], class_vali_label)
# validation performance
print(' classification evaluation loss=',accuracy)
history_class_vali.append(accuracy)
# checkpoint and earlly stop
if epoch_class - class_min_loss_index >= self.patience_class:
class_erally_stop_flag = 0
if accuracy[0] < class_min_loss:
class_min_loss_index = epoch_class
class_min_loss = accuracy[0]
# checkpoint
best_class_model = self.model_class_task
##5.save model
#(1).class model
model_path = os.path.join(save_dir,model_name+'_class.h5')
best_class_model.save(model_path)
#(2).reg model
model_path = os.path.join(save_dir,model_name+'_reg.h5')
best_reg_model.save(model_path)
print("save model!")
def save_predict_result(self,predict_result,real_label_or_value,model_name,class_or_reg,type):
if predict_result.shape[1] == 1:
if class_or_reg=='class':
df = predict_result
df.columns = ['predict_label']
else:
df = predict_result
df.columns = ['predict_value']
else:
df = predict_result
df.columns = ['predict_label','predict_value']
if class_or_reg=='class':
df['real_lable'] = real_label_or_value
else:
df['real_value'] = real_label_or_value
df['set']=type
if not os.path.exists('predict_value'):
os.mkdir('predict_value')
df.to_csv('predict_value/multi-task_model_%s_%s_%s_predict_result.csv' % (model_name,class_or_reg,type),
index=False)
print('predict_value/multi-task_model_%s_%s_%s_predict_result.csv has been saved!' % (model_name,class_or_reg,type))
return df
def computer_parameter_draw_scatter_plot(self,predictions, model_name):
sns.set(context='paper', style='white')
sns.set_color_codes()
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple'}
for set_name, table in predictions.groupby('set'):
rmse = ((table['predict_value'] - table['real_value']) ** 2).mean() ** 0.5
mae = (np.abs(table['predict_value'] - table['real_value'])).mean()
corr = scipy.stats.pearsonr(table['predict_value'], table['real_value'])
lr = LinearRegression()
lr.fit(table[['predict_value']], table['real_value'])
y_ = lr.predict(table[['predict_value']])
sd = (((table["real_value"] - y_) ** 2).sum() / (len(table) - 1)) ** 0.5
print("%10s set: RMSE=%.3f, MAE=%.3f, R=%.2f (p=%.2e), SD=%.3f" %
(set_name, rmse, mae, *corr, sd))
grid = sns.jointplot('real_value', 'predict_value', data=table, stat_func=None, color=set_colors[set_name],
space=0, size=4, ratio=4, s=20, edgecolor='w', ylim=(0, 16), xlim=(0, 16)) # (0.16)
grid.set_axis_labels('real', 'predicted')#, fontsize=16
grid.ax_joint.set_xticks(range(0, 16, 5))
grid.ax_joint.set_yticks(range(0, 16, 5))
a = {'train': 'training', 'validation': 'validation', 'test': 'test'}
set_name=a[set_name]
grid.ax_joint.text(1, 14, set_name + ' set', fontsize=14) # 调整标题大小
grid.ax_joint.text(16, 19.5, 'RMSE: %.3f' % (rmse), fontsize=9)
grid.ax_joint.text(16, 18.5, 'MAE: %.3f ' % mae, fontsize=9)
grid.ax_joint.text(16, 17.5, 'R: %.2f ' % corr[0], fontsize=9)
grid.ax_joint.text(16, 16.5, 'SD: %.3f ' % sd, fontsize=9)
grid.fig.savefig('%s_%s_scatter_plot.jpg' %(model_name,set_name), dpi=400)
def draw_ROC_curve(self,predictions, model_name):
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple','independent test':'r'}
for set_name, table in predictions.groupby('set'):
fpr, tpr, threshold = roc_curve(table['real_lable'],table['predict_label'])
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(fpr, tpr, color=set_colors[set_name],
lw=lw, label='ROC curve (auc = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'b--', lw=lw,
label='Random guess (auc = 0.5)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.tick_params(labelsize=20)
plt.xlabel('False Positive Rate', self.font2)
plt.ylabel('True Positive Rate', self.font2)
# plt.title('ROC curv')
plt.legend(loc="lower right", prop=self.font1)
plt.savefig("%s_%s_ROC_curve.png" %(model_name,set_name))
def test_model(self,model_file,class_test_file,class_train_file,class_vali_file,
reg_test_file,reg_train_file,reg_vali_file):
##read data
print('starting read data!')
#1.train data
class_train_prot, class_train_comp, class_train_label=dg.multi_process_read_pro_com_file(class_train_file)
reg_train_prot, reg_train_comp,_, reg_train_value=dg.multi_process_read_pro_com_file_regression(reg_train_file)
#2.validation data
class_vali_prot, class_vali_comp, class_vali_label = dg.multi_process_read_pro_com_file(class_vali_file)
reg_vali_prot, reg_vali_comp, _,reg_vali_value = dg.multi_process_read_pro_com_file_regression(reg_vali_file)
#3.test data
class_test_prot, class_test_comp, class_test_label = dg.multi_process_read_pro_com_file(class_test_file)
reg_test_prot, reg_test_comp,_, reg_test_value = dg.multi_process_read_pro_com_file_regression(reg_test_file)
print('classification data size:', len(class_train_prot), len(class_vali_prot), len(class_test_prot))
print('regression data size:', len(reg_train_prot),len(reg_vali_prot),len(reg_test_prot))
##load_model
print('loading modle!')
model = load_model(model_file)
tmp = model_file.split('/')[-1]
model_name = re.findall(r"(.+?).h5", tmp)[0]
## saving predict value
#predict value
#1.train
class_train_predict_value = model.predict([class_train_prot, class_train_comp])
class_train_predict_value_df=pd.DataFrame(class_train_predict_value[0],columns=['label'])
class_train_predict_value_df['value']=class_train_predict_value[1]
reg_train_predict_value = model.predict([reg_train_prot, reg_train_comp])
reg_train_predict_value_df=pd.DataFrame(reg_train_predict_value[0],columns=['label'])
reg_train_predict_value_df['value']=reg_train_predict_value[1]
#2.vali
class_vali_predict_value = model.predict([class_vali_prot, class_vali_comp])
class_vali_predict_value_df = pd.DataFrame(class_vali_predict_value[0])
class_vali_predict_value_df['value']=class_vali_predict_value[1]
reg_vali_predict_value = model.predict([reg_vali_prot, reg_vali_comp])
reg_vali_predict_value_df = pd.DataFrame(reg_vali_predict_value[0])
reg_vali_predict_value_df['value']=reg_vali_predict_value[1]
#3.test
class_test_predict_value = model.predict([class_test_prot, class_test_comp])
class_test_predict_value_df = pd.DataFrame(class_test_predict_value[0])
class_test_predict_value_df['value']=class_test_predict_value[1]
reg_test_predict_value=model.predict([reg_test_prot, reg_test_comp])
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value[0])
reg_test_predict_value_df['value']=reg_test_predict_value[1]
# save predicted value
#1
class_train_df = self.save_predict_result(class_train_predict_value_df, class_train_label, model_name, 'class', 'train')
reg_train_df = self.save_predict_result(reg_train_predict_value_df, reg_train_value, model_name, 'reg', 'train')
#2
class_vali_df = self.save_predict_result(class_vali_predict_value_df, class_vali_label, model_name, 'class', 'validation')
reg_vali_df = self.save_predict_result(reg_vali_predict_value_df, reg_vali_value, model_name, 'reg', 'validation')
#3
class_test_df = self.save_predict_result(class_test_predict_value_df, class_test_label, model_name, 'class', 'test')
reg_test_df = self.save_predict_result(reg_test_predict_value_df, reg_test_value, model_name, 'reg', 'test')
## computing parameters and drawing scatter plot
self.computer_parameter_draw_scatter_plot(reg_train_df, model_name)
self.computer_parameter_draw_scatter_plot(reg_vali_df, model_name)
self.computer_parameter_draw_scatter_plot(reg_test_df, model_name)
self.draw_ROC_curve(class_train_df, model_name)
self.draw_ROC_curve(class_vali_df, model_name)
self.draw_ROC_curve(class_test_df, model_name)
def reg_test_model(self,model_file,reg_test_file,reg_train_file=None,reg_vali_file=None):
##load_model
print('loading modle!')
self.model_reg_task = load_model(model_file,
custom_objects={'mean_squared_error_l2': self.mean_squared_error_l2})
tmp = model_file.split('/')[-1]
if tmp.find('.h5')!=-1:
model_name = re.findall(r"(.+?).h5", tmp)[0]
else:
model_name = re.findall(r"(.+?).hdf5", tmp)[0]
##1.read data
print('starting read data!')
reg_test_prot, reg_test_comp,_, reg_test_value = dg.read_pro_com_file_regression(reg_test_file)#multi_process_read_pro_com_file_regression(reg_test_file)
print('test data size:',len(reg_test_prot))
reg_test_predict_value=self.model_reg_task.predict([reg_test_prot, reg_test_comp])
if model_name[-3:]=='reg':#reg_model
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value,columns=['value'])
else:#total model
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value[0], columns=['label'])
reg_test_predict_value_df['value']=reg_test_predict_value[1]
reg_test_df = self.save_predict_result(reg_test_predict_value_df, reg_test_value, model_name, 'reg', | |
"""
Testing numba implementation of the numba dictionary.
The tests here only check that the numba typing and codegen are working
correctly. Detailed testing of the underlying dictionary operations is done
in test_dictimpl.py.
"""
from __future__ import print_function, absolute_import, division
import sys
import numpy as np
from numba import njit, utils
from numba import int32, int64, float32, float64, types
from numba import dictobject
from numba.typed import Dict
from numba.utils import IS_PY3
from numba.errors import TypingError
from .support import TestCase, MemoryLeakMixin, unittest
skip_py2 = unittest.skipUnless(IS_PY3, reason='not supported in py2')
class TestDictObject(MemoryLeakMixin, TestCase):
def test_dict_create(self):
"""
Exercise dictionary creation, insertion and len
"""
@njit
def foo(n):
d = dictobject.new_dict(int32, float32)
for i in range(n):
d[i] = i + 1
return len(d)
# Insert nothing
self.assertEqual(foo(n=0), 0)
# Insert 1 entry
self.assertEqual(foo(n=1), 1)
# Insert 2 entries
self.assertEqual(foo(n=2), 2)
# Insert 100 entries
self.assertEqual(foo(n=100), 100)
def test_dict_get(self):
"""
Exercise dictionary creation, insertion and get
"""
@njit
def foo(n, targets):
d = dictobject.new_dict(int32, float64)
# insertion loop
for i in range(n):
d[i] = i
# retrieval loop
output = []
for t in targets:
output.append(d.get(t))
return output
self.assertEqual(foo(5, [0, 1, 9]), [0, 1, None])
self.assertEqual(foo(10, [0, 1, 9]), [0, 1, 9])
self.assertEqual(foo(10, [-1, 9, 1]), [None, 9, 1])
def test_dict_get_with_default(self):
"""
Exercise dict.get(k, d) where d is set
"""
@njit
def foo(n, target, default):
d = dictobject.new_dict(int32, float64)
# insertion loop
for i in range(n):
d[i] = i
# retrieval loop
return d.get(target, default)
self.assertEqual(foo(5, 3, -1), 3)
self.assertEqual(foo(5, 5, -1), -1)
def test_dict_getitem(self):
"""
Exercise dictionary __getitem__
"""
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# lookup
return d[target]
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), 0.1)
self.assertEqual(foo(keys, vals, 2), 0.2)
self.assertEqual(foo(keys, vals, 3), 0.3)
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaises(KeyError):
foo(keys, vals, 0)
with self.assertRaises(KeyError):
foo(keys, vals, 4)
def test_dict_popitem(self):
"""
Exercise dictionary .popitem
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return d.popitem()
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
for i in range(1, len(keys)):
self.assertEqual(
foo(keys[:i], vals[:i]),
(keys[i - 1], vals[i - 1]),
)
def test_dict_popitem_many(self):
"""
Exercise dictionary .popitem
"""
@njit
def core(d, npop):
# popitem
keysum, valsum = 0, 0
for _ in range(npop):
k, v = d.popitem()
keysum += k
valsum -= v
return keysum, valsum
@njit
def foo(keys, vals, npop):
d = dictobject.new_dict(int32, int32)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return core(d, npop)
keys = [1, 2, 3]
vals = [10, 20, 30]
for i in range(len(keys)):
self.assertEqual(
foo(keys, vals, npop=3),
core.py_func(dict(zip(keys, vals)), npop=3),
)
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaises(KeyError):
foo(keys, vals, npop=4)
def test_dict_pop(self):
"""
Exercise dictionary .pop
"""
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return d.pop(target, None), len(d)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), (0.1, 2))
self.assertEqual(foo(keys, vals, 2), (0.2, 2))
self.assertEqual(foo(keys, vals, 3), (0.3, 2))
self.assertEqual(foo(keys, vals, 0), (None, 3))
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
@njit
def foo():
d = dictobject.new_dict(int32, float64)
# popitem
return d.pop(0)
with self.assertRaises(KeyError):
foo()
def test_dict_pop_many(self):
"""
Exercise dictionary .pop
"""
@njit
def core(d, pops):
total = 0
for k in pops:
total += k + d.pop(k, 0.123) + len(d)
total *= 2
return total
@njit
def foo(keys, vals, pops):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
# popitem
return core(d, pops)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
pops = [2, 3, 3, 1, 0, 2, 1, 0, -1]
self.assertEqual(
foo(keys, vals, pops),
core.py_func(dict(zip(keys, vals)), pops),
)
def test_dict_delitem(self):
@njit
def foo(keys, vals, target):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
del d[target]
return len(d), d.get(target)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals, 1), (2, None))
self.assertEqual(foo(keys, vals, 2), (2, None))
self.assertEqual(foo(keys, vals, 3), (2, None))
# check no leak so far
self.assert_no_memory_leak()
# disable leak check for exception test
self.disable_leak_check()
with self.assertRaises(KeyError):
foo(keys, vals, 0)
def test_dict_clear(self):
"""
Exercise dict.clear
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
b4 = len(d)
# clear
d.clear()
return b4, len(d)
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(foo(keys, vals), (3, 0))
def test_dict_items(self):
"""
Exercise dict.items
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for kv in d.items():
out.append(kv)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
list(zip(keys, vals)),
)
# Test .items() on empty dict
@njit
def foo():
d = dictobject.new_dict(int32, float64)
out = []
for kv in d.items():
out.append(kv)
return out
self.assertEqual(foo(), [])
def test_dict_keys(self):
"""
Exercise dict.keys
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in d.keys():
out.append(k)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
keys,
)
def test_dict_values(self):
"""
Exercise dict.values
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for v in d.values():
out.append(v)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
vals,
)
def test_dict_iter(self):
"""
Exercise iter(dict)
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in d:
out.append(k)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals),
[1, 2, 3]
)
def test_dict_contains(self):
"""
Exercise operator.contains
"""
@njit
def foo(keys, vals, checklist):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
out = []
for k in checklist:
out.append(k in d)
return out
keys = [1, 2, 3]
vals = [0.1, 0.2, 0.3]
self.assertEqual(
foo(keys, vals, [2, 3, 4, 1, 0]),
[True, True, False, True, False],
)
def test_dict_copy(self):
"""
Exercise dict.copy
"""
@njit
def foo(keys, vals):
d = dictobject.new_dict(int32, float64)
# insertion
for k, v in zip(keys, vals):
d[k] = v
return list(d.copy().items())
keys = list(range(20))
vals = [x + i / 100 for i, x in enumerate(keys)]
out = foo(keys, vals)
self.assertEqual(out, list(zip(keys, vals)))
def test_dict_setdefault(self):
"""
Exercise dict.setdefault
"""
@njit
def foo():
d = dictobject.new_dict(int32, float64)
d.setdefault(1, 1.2) # used because key is not in
a = d.get(1)
d[1] = 2.3
b = d.get(1)
d[2] = 3.4
d.setdefault(2, 4.5) # not used because key is in
c = d.get(2)
return a, b, c
self.assertEqual(foo(), (1.2, 2.3, 3.4))
def test_dict_equality(self):
"""
Exercise dict.__eq__ and .__ne__
"""
@njit
def foo(na, nb, fa, fb):
da = dictobject.new_dict(int32, float64)
db = dictobject.new_dict(int32, float64)
for i in range(na):
da[i] = i * fa
for i in range(nb):
db[i] = i * fb
return da == db, da != db
# Same keys and values
self.assertEqual(foo(10, 10, 3, 3), (True, False))
# Same keys and diff values
self.assertEqual(foo(10, 10, 3, 3.1), (False, True))
# LHS has more keys
self.assertEqual(foo(11, 10, 3, 3), (False, True))
# RHS has more keys
self.assertEqual(foo(10, 11, 3, 3), (False, True))
def test_dict_equality_more(self):
"""
Exercise dict.__eq__
"""
@njit
def foo(ak, av, bk, bv):
# The key-value types are different in the two dictionaries
da = dictobject.new_dict(int32, float64)
db = dictobject.new_dict(int64, float32)
for i in range(len(ak)):
| |
that part of the surface is now merged
centerlines = vmtk_resample_centerline(centerlines, length=0.1)
inlet = centerlines.GetPoint(0)
outlets = []
lines_to_compare = []
for i in range(centerlines.GetNumberOfLines()):
lines_to_compare.append(extract_single_line(centerlines, i))
outlets += lines_to_compare[-1].GetPoint(lines_to_compare[-1].GetNumberOfPoints() - 1)
lines_to_check, _, _ = compute_centerlines(inlet, outlets, None, surface,
resampling=0.1, recompute=True)
for i in range(centerlines.GetNumberOfLines()):
line_to_compare = vmtk_resample_centerline(lines_to_compare[i], length=0.1)
line_to_check = vmtk_resample_centerline(extract_single_line(lines_to_check, i), length=0.1)
# Compare distance between points along both centerliens
n = min([line_to_check.GetNumberOfPoints(), line_to_compare.GetNumberOfPoints()])
tolerance = get_centerline_tolerance(line_to_compare) * 500
for j in range(n):
p1 = np.asarray(line_to_check.GetPoint(j))
p2 = np.asarray(line_to_compare.GetPoint(j))
dist = get_distance(p1, p2)
if dist > tolerance:
tmp_path = output_filepath.replace(".vtp", "_ERROR_MERGED.vtp")
write_polydata(surface, tmp_path)
raise RuntimeError(("\nERROR: Model has most likely overlapping regions." +
" Please check the surface model {} and provide other" +
" parameters for the manipulation or" +
" poly_ball_size.").format(tmp_path))
def prepare_output_surface(surface, original_surface, new_centerline, output_filepath,
test_merge=False, changed=False, old_centerline=None,
removed=[[1e9, 1e9, 1e9]]):
"""After manipulation preparing the surface for output. This method clipps the
outlets, slightly smooths the surface, and (potentially) tests if the surface is is
merged.
Args:
surface (vtkPolyData): The new surface after manipulation.
original_surface (vtkPolyData): The original surface inputed for manipulation.
new_centerline (vtkPolyData): The centerline after manipulation.
output_filepath (str): The user-defined path to the output.
test_merge (bool): Turn on/off testing if the surface is merged.
changed (bool): If the manipulated surface has changed the location of the
inlet/outlet.
old_centerline (vtkPolyData): The old centerline for the original centerline.
Returns:
surface (vtkPolyData): The surface ready for output.
"""
# Check if the folder for the output exits
if not path.exists(path.dirname(output_filepath)):
if path.dirname(output_filepath) != "":
makedirs(path.dirname(output_filepath))
# Get planes if outlets of the original surface
boundary_edges = vtk_extract_feature_edges(original_surface)
boundary_connectivity = vtk_compute_connectivity(boundary_edges)
vtk_array = boundary_connectivity.GetPointData().GetArray("RegionId")
vtk_points = boundary_connectivity.GetPoints().GetData()
region_id = numpy_support.vtk_to_numpy(vtk_array)
points = numpy_support.vtk_to_numpy(vtk_points)
centerline = new_centerline if old_centerline is None else old_centerline
outlets = []
lines = []
for i in range(centerline.GetNumberOfLines()):
lines.append(extract_single_line(centerline, i))
outlets.append(lines[-1].GetPoint(lines[-1].GetNumberOfPoints() - 1))
inlet_point = lines[-1].GetPoint(0)
if changed and old_centerline is None:
print("WARNING: The changed flag is true, but the old centerline is not provided," +
" and the outlet location can therefore not be changed.")
# Get information from the original geometry
inlet = False
for i in range(region_id.max() + 1):
# Get relevant points
tmp_points = points[region_id == i]
# Get normal
tmp_normal = np.cross(tmp_points[0] - tmp_points[-1],
tmp_points[0] - tmp_points[tmp_points.shape[0] // 2])
normal = tmp_normal / np.sqrt(np.sum(tmp_normal ** 2))
# Get Center
center = np.mean(tmp_points, axis=0)
# Check if branch has been removed
if np.sqrt(np.sum(np.array(removed) - center) ** 2) < 0.5:
continue
# Get corresponding centerline to in/outlet
if np.sqrt(np.sum((np.array(inlet_point) - center) ** 2)) < 0.5:
line = lines[0]
line_id = 0
inlet = True
else:
line_id = np.argmin(np.sqrt(np.sum((np.array(outlets) - center) ** 2, axis=1)))
line = lines[line_id]
# Set correct direction of normal
if inlet:
in_dir = np.array(line.GetPoint(5)) - \
np.array(line.GetPoint(0))
else:
in_dir = np.array(line.GetPoint(line.GetNumberOfPoints() - 5)) - \
np.array(line.GetPoint(line.GetNumberOfPoints() - 1))
in_dir = in_dir / np.sqrt(np.sum(in_dir ** 2))
angle = np.arccos(np.dot(in_dir, normal)) * 180 / np.pi
normal = -normal if 90 < angle < 270 else normal
# Mapp the old center and normals to the altered model
if changed and old_centerline is not None:
new_line = extract_single_line(new_centerline, line_id)
# Set correct direction of normal
if inlet:
new_outlet = np.array(new_line.GetPoint(0))
in_dir_new = np.array(new_line.GetPoint(5)) - new_outlet
translation = new_outlet - np.array(inlet_point)
else:
new_outlet = np.array(new_line.GetPoint(new_line.GetNumberOfPoints() - 1))
in_dir_new = np.array(new_line.GetPoint(new_line.GetNumberOfPoints() - 5)) - new_outlet
translation = new_outlet - np.array(outlets[line_id])
center += translation
in_dir_new = in_dir_new / np.sqrt(np.sum(in_dir_new ** 2))
in_dir_normal = np.cross(in_dir_new, in_dir)
dir_angle = np.arccos(np.dot(in_dir, in_dir_new)) * 180 / np.pi
translation = vtk.vtkTransform()
translation.RotateWXYZ(-dir_angle, in_dir_normal)
tmp_normal = normal
normal = [0, 0, 0]
translation.TransformNormal(tmp_normal, normal)
# Set plane
plane = vtk_plane(center, normal)
# Clip data (naivly)
surface, clipped = vtk_clip_polydata(surface, plane)
# Reattach data which should not have been clipped
surface = attach_clipped_regions_to_surface(surface, clipped, center)
inlet = False
# Perform a 'light' smoothing to obtain a nicer surface
surface = vmtk_smooth_surface(surface, method="laplace", iterations=100)
# Clean surface
surface = vtk_clean_polydata(surface)
surface = vtk_triangulate_surface(surface)
# Capped surface
capped_surface = vmtk_cap_polydata(surface)
if test_merge:
check_if_surface_is_merged(capped_surface, new_centerline, output_filepath)
return surface
def attach_clipped_regions_to_surface(surface, clipped, center):
"""Check the connectivty of a clipped surface, and attach all sections which are not
closest to the center of the clipping plane.
Args:
surface (vtkPolyData):
clipped (vtkPolyData): The clipped segments of the surface.
center (list): The center of the clipping point
Returns:
surface (vtkPolyData): The surface where only one segment has been removed.
"""
connectivity = vtk_compute_connectivity(clipped, mode="All")
if connectivity.GetNumberOfPoints() == 0:
return surface
region_id = get_point_data_array("RegionId", connectivity)
distances = []
regions = []
for i in range(int(region_id.max() + 1)):
regions.append(vtk_compute_threshold(connectivity, "RegionId", lower=i - 0.1, upper=i + 0.1, source=0))
locator = get_vtk_point_locator(regions[-1])
region_point = regions[-1].GetPoint(locator.FindClosestPoint(center))
distances.append(get_distance(region_point, center))
# Remove the region with the closest distance
regions.pop(distances.index(min(distances)))
# Add the other regions back to the surface
surface = vtk_merge_polydata(regions + [surface])
surface = vtk_clean_polydata(surface)
surface = vtk_triangulate_surface(surface)
return surface
def prepare_voronoi_diagram(capped_surface, centerlines, base_path, smooth, smooth_factor, no_smooth, no_smooth_point,
voronoi, pole_ids, resampling_length, absolute=False, upper=None):
"""
Compute and smooth voronoi diagram of surface model.
Args:
capped_surface (polydata): Cappedsurface model to create a Voronoi diagram of.
base_path (str): Absolute path to surface model path.
voronoi (vtkPolyData): Voronoi diagram.
pole_ids (vtkIDList): Pole ids of Voronoi diagram.
smooth (bool): Voronoi is smoothed if True.
smooth_factor (float): Smoothing factor for voronoi smoothing.
centerlines (vtkPolyData): Centerlines throughout geometry.
no_smooth (bool): Part of Voronoi is not smoothed.
no_smooth_point (vtkPolyData): Point which defines unsmoothed area.
resampling_length (float): Length of resampling the centerline.
absolute (bool): Turn on/off absolute values for the smoothing. Default is off.
upper (int): Set an upper limit for the smoothing factor. Default is None.
Returns:
voronoi (vtkPolyData): Voronoi diagram of surface.
"""
# Check if a region should not be smoothed
if smooth and no_smooth:
no_smooth_cl = get_no_smooth_cl(capped_surface, centerlines, base_path, smooth, no_smooth, voronoi,
no_smooth_point, pole_ids, resampling_length)
else:
no_smooth_cl = None
if voronoi is None:
voronoi = vmtk_compute_voronoi_diagram(capped_surface, base_path + "_voronoi.vtp")
# Smooth voronoi
voronoi_smoothed_path = base_path + "_voronoi_smoothed.vtp"
surface_smoothed_path = base_path + "_smoothed.vtp"
if not path.exists(voronoi_smoothed_path) and smooth:
voronoi = smooth_voronoi_diagram(voronoi, centerlines, smooth_factor, no_smooth_cl)
write_polydata(voronoi, voronoi_smoothed_path)
# Create new surface from the smoothed Voronoi
surface_smoothed = create_new_surface(voronoi)
write_polydata(surface_smoothed, surface_smoothed_path)
elif smooth:
voronoi = read_polydata(voronoi_smoothed_path)
return voronoi
def compute_centerlines(inlet, outlet, filepath, surface, resampling=1.0, smooth=False,
num_iter=100, smooth_factor=0.1, end_point=1, method="pointlist",
recompute=False, voronoi=None, pole_ids=None, base_path=None):
"""Wrapper for vmtkcenterlines and vmtkcenterlinesmoothing.
Args:
inlet (list): point of the inlet
outlet (list): flatt list of the outlet points
filepath (str): path to where to store the centerline
surface (vtkPolyData): surface to get the centerline from.
resampling (float): resampling step length.
smooth (bool): smooth centerline or not.
num_iter (int): number of iterations in smooth.
smooth_factor (float): smoothing factor.
end_point (int): 0 or 1, include end point in centerline.
method (str): method for setting the inlet and outlet location
recompute (bool): if filepath exists, but the centerline should be computed again
anyway.
voronoi (vtkPolyData): Optional argument for setting the Voronoi diagram.
pole_ids (vtkIdList): A vtkIdList coupling the surface with the voronoi diagram
base_path (str): path to the case
Returns:
centerline (vtkPolyData): centerline of the surface.
voronoi (vtkPolyData): Voronoi data.
pole_ids (vtkIdList): vtkIdList coupling the surface and the voronoi diagram.
"""
if path.isfile(str(filepath)) and not recompute: # Filepath might be None
if base_path is not None and path.isfile(base_path + "_voronoi.vtp"):
voronoi = read_polydata(base_path + "_voronoi.vtp")
pole_ids = read_polydata(base_path + "_pole_ids.np", datatype="vtkIdList")
else:
voronoi = None
pole_ids = None
return read_polydata(filepath), voronoi, pole_ids
centerlines, centerlines_output = vmtk_compute_centerlines(end_point, inlet, method, outlet, pole_ids, resampling,
surface, voronoi)
if smooth:
centerlines_output = vmtk_smooth_centerline(centerlines_output, num_iter, smooth_factor)
# Save the computed centerline.
if filepath is not None:
write_polydata(centerlines_output, filepath)
voronoi = centerlines.VoronoiDiagram
pole_ids = centerlines.PoleIds
if base_path is not None:
write_polydata(voronoi, base_path + "_voronoi.vtp")
write_polydata(pole_ids, base_path + "_pole_ids.np", datatype="vtkIdList")
return centerlines_output, voronoi, pole_ids
def prepare_surface(base_path, surface_path):
"""
Clean and check connectivity of surface.
Capps or uncapps surface at inlet and outlets.
Args:
base_path (str): Absolute path to base folder.
surface_path (str): Path to surface.
Returns:
open_surface (vtkPolyData): Open surface.
Returns:
capped_surface | |
self.get_plot_compare_behaviour_correlation(astroA_l)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
behaviour_corr_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'behaviour_corr_dff')
fig_behaviour_corr = self.get_plot_compare_behaviour_correlation(astroA_l, dff_mode=True)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
'''
def plot_comparisons_all(self, astroA_l, astroA_l_pairs=None, astroA_l_good_pairs=None, astroA_l_good=None, astroA_long_l=None):
output_experiment_path_all_comparison, _, _, astroA_l_s = self.setup_comparison_all_vars(astroA_l, self.output_folder)
print('Plotting sizes histogram dataset comparison for each behaviour')
self.setup_plot_folders_all_comparison(output_experiment_path_all_comparison)
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
'''
print('Saving results of ratios running, rest, stick-running, stick-rest of each astrocyte in csv...')
c = ['running', 'rest', 'stick_run_ind_15', 'stick_rest', 'total_time_s', 'total_time_m', 'avg_running_speed', 'avg_speed_global']
c_n = ['running', 'rest', 'stick_run', 'stick_rest', 'total_time(s)', 'total_time(m)', 'avg_speed(cm/s)', 'avg_speed_global(cm/s)']
astro_ratios_np = np.zeros([len(astroA_l), len(c)])
r = [astroA.id for astroA in astroA_l]
for i, astroA in enumerate(astroA_l):
num_frames = len(astroA.indices_d['default'])
num_seconds = num_frames / astroA.fr
num_minutes = general_utils.truncate(num_seconds / 60.0, 2)
num_seconds = general_utils.truncate(num_seconds, 2)
for j, k in enumerate(c):
if j == 4:
astro_ratios_np[i, j] = num_seconds
continue
if j == 5:
astro_ratios_np[i, j] = num_minutes
continue
if k not in astroA.indices_d:
if 'speed' in k:
if k == 'avg_running_speed':
astro_ratios_np[i, j] = np.mean(astroA.speed_values[astroA.speed_values!=0])
elif k == 'avg_speed_global':
astro_ratios_np[i, j] = np.mean(astroA.speed_values)
else:
print('Not exist', k, astroA.id)
astro_ratios_np[i, j] = 0
continue
else:
astro_ratios_np[i, j] = general_utils.truncate(len(astroA.indices_d[k]) / num_frames, 3)
behaviour_ratios_csv_path = os.path.join(output_experiment_path_all_comparison, 'data', 'behaviour_ratios', 'ratios.csv')
DataFrame(astro_ratios_np, columns=c, index=r).to_csv(behaviour_ratios_csv_path)
'''
'''
print('Saving results of average maximum characteristic values (e.g. Average maximum duration over all astrocyte recordings)')
measure_l = ['area', 'dffMax2', 'duration']
measure_names_l = ['area', 'amplitude', 'duration']
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
settings = ['max', 'meantop10', 'mediantop10', 'meantop5', 'mediantop5']
settings_d_i = {setting: i for i, setting in enumerate(settings)}
np_d = [np.zeros([len(astroA_l), len(bh_l)]) for i in range(len(settings))]
max_np = np.zeros([len(astroA_l), len(bh_l)])
r = [astroA.id for astroA in astroA_l]
#Dictionary of events for each behaviour for each astrocyte.
#events_d_d['astro_id']['behaviour'] = event ids of astro id
events_d_d = {}
for astroA in astroA_l:
d = {'default': astroA.indices_d['default']}
for bh in bh_l:
if bh in astroA.indices_d:
d[bh] = astroA.indices_d[bh]
events_d_d[astroA.print_id] = aqua_utils.get_event_subsets(d, astroA.res_d)
base_path = os.path.join(output_experiment_path_all_comparison, 'data', 'top_average_values')
for m_i, measure in enumerate(measure_l):
for i, astroA in enumerate(astroA_l):
measure_vals_all = astroA.res_d[measure]
bh_events_d = events_d_d[astroA.print_id]
for j, bh in enumerate(bh_l):
if bh in bh_events_d:
#Measure values corresponding to given astrocyte & measure & behaviour
bh_measure_vals = measure_vals_all[bh_events_d[bh]]
bh_measure_vals_s = np.sort(bh_measure_vals)[::-1]
top10 = bh_measure_vals_s[:len(bh_measure_vals_s)//10]
top5 = bh_measure_vals_s[:len(bh_measure_vals_s)//20]
print(astroA.print_id)
if astroA.print_id == 'm181129_d190222_c005_day_0' and bh == 'stick_rest':
print('A')
print(top5)
if astroA.print_id == 'm181129_d190222_c005_day_3' and bh == 'stick_rest':
print('B')
print(top5)
np_d[settings_d_i['max']][i, j] = bh_measure_vals_s[0]
np_d[settings_d_i['meantop10']][i, j] = np.mean(top10)
np_d[settings_d_i['meantop5']][i, j] = np.mean(top5)
np_d[settings_d_i['mediantop10']][i, j] = np.median(top10)
np_d[settings_d_i['mediantop5']][i, j] = np.median(top5)
for setting in settings_d_i.keys():
DataFrame(np_d[settings_d_i[setting]], columns=bh_l, index=r).to_csv(os.path.join(base_path, 'measure={}-type={}.csv'.format(measure_names_l[m_i], setting)))
'''
'''
measure_l = ['time_s', 'dffMax2', 'area']
measure_names = ['Duration(s)', 'Amplitude', 'Area']
print('Calcium signal behaviour change over time')
#How does calcium signals change over recording time?
#1 sort events by time
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
for i, measure in enumerate(measure_l):
sorted_ev_i = np.argsort(astroA.res_d['tBegin'])
x = []
y = []
for ev_i in sorted_ev_i:
x.append(ev_i)
y.append(astroA.res_d[measure][ev_i])
fig = plotly_utils.plot_scatter(np.array(x), np.array(y) , mode='markers', title='scatter', x_title='', y_title='')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-{}'.format(astroA.print_id, measure_names[i])))
'''
'''
print('Speed over time...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
fig = plotly_utils.plot_scatter(np.arange(len(astroA.speed_values)), astroA.speed_values, mode='lines')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-speed'.format(astroA.print_id)))
'''
'''
print('Individual behaviour distribution plots...')
for n_bins in [10, 20, 40, 80]:
#Size, amplitude, signal duration distribution plots over all datasets on different behaviours
for bh in bh_l:
plt_l = []
pth_l = []
for measure, min_measure, max_measure in [
['area', None, 6],
['area', None, None],
['dffMax2', None, 5],
['dffMax2', None, None],
['duration', None, None],
['duration', None, 50]
]:
try:
for with_max in [True, False]:
measure_name = aqua_utils.get_measure_names(measure)
fig_path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_comparison'.format(measure_name), '{}-nbins={}-min={}-max={}'.format(bh, n_bins, min_measure, max_measure))
plot, _, _ = self.measure_distribution_plot(astroA_l, bh, measure=measure, num_bins=n_bins, max_measure=max_measure, min_measure=min_measure, measure_name=measure_name)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [fig_path], axis='x')
except KeyError as e:
print('Got key error: some behaviour its fine {}'.format(e))
'''
'''
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
for mode in ['MOA', 'MOE']:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = self.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
for confidence in [True]:
for with_log in [False, True]:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
print('THE STAT HERE?', stats_d)
'''
'''
print('Violin plots...')
plt_l = []
pth_l = []
for max_dff in [2, 5, 10, None]:
#VIOLIN PLOTS comparing TWO behaviour distribution plots (but in violin form)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_run_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_run_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path2)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path3 = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path3)
for max_area in [9, 20, 40, None]:
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_run_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_run_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
for max_duration in [10, 20, 30, 40, None]:
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_run_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_run_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
save_pth_plt_l_log(plt_l, pth_l, axis='y')
'''
'''
print('Splits SELF ALL')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = self.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('HEATMAPS V2... (astro days scaled the same (to minimum maximum scale of the 2))')
for astroA_pair | |
<gh_stars>0
"""Contains functions to perform detection, deblending and measurement
on images.
"""
from functools import partial
import astropy.table
import numpy as np
import skimage.feature
from btk import plot_utils
from btk.compute_metrics import Metrics_params
from btk.measure import Measurement_params
class SEP_params(Measurement_params):
"""Class to perform detection and deblending with SEP"""
def __init__(self):
self.catalog = None
self.segmentation = None
def get_centers(self, image):
"""Return centers detected when object detection and photometry
is done on input image with SEP.
It also initializes the self.catalog and self.segmentation attributes
of the class object.
Args:
image: Image (single band) of galaxy to perform measurement on.
Returns:
centers: x and y coordinates of detected centroids
"""
sep = __import__("sep")
bkg = sep.Background(image)
self.catalog, self.segmentation = sep.extract(
image, 1.5, err=bkg.globalrms, segmentation_map=True
)
centers = np.stack((self.catalog["x"], self.catalog["y"]), axis=1)
return centers
def get_deblended_images(self, data, index):
"""Performs SEP detection on the band-coadd image and returns the
detected peaks.
Args:
data (dict): Output generated by btk.draw_blends containing blended
images, isolated images, observing conditions and blend
catalog, for a given batch.
index (int): Index number of blend scene in the batch to preform
measurement on.
Returns:
dict with the centers of sources detected by SEP detection
algorithm.
"""
image = np.mean(data["blend_images"][index], axis=2)
peaks = self.get_centers(image)
return {"deblend_image": None, "peaks": peaks}
class Stack_params(Measurement_params):
"""Class with functions that describe how LSST science pipeline can
perform measurements on the input data."""
min_pix = 1 # Minimum size in pixels to be considered a source
bkg_bin_size = 32 # Binning size of the local background
thr_value = 5 # SNR threshold for the detection
psf_stamp_size = 41 # size of psf stamp to draw PSF on
def make_measurement(self, data, index):
"""Perform detection, deblending and measurement on the i band image of
the blend for input index entry in the batch.
Args:
data: Dictionary with blend images, isolated object images, blend
catalog, and observing conditions.
index: Position of the blend to measure in the batch.
Returns:
astropy.Table of the measurement results.
"""
image_array = data["blend_images"][index, :, :, 3].astype(np.float32)
obs_conds = data["obs_condition"][3]
psf_image, mean_sky_level = obs_conds.get_psf_sky(self.psf_stamp_size)
variance_array = image_array + mean_sky_level
psf_array = psf_image.astype(np.float64)
cat = run_stack(
image_array,
variance_array,
psf_array,
min_pix=self.min_pix,
bkg_bin_size=self.bkg_bin_size,
thr_value=self.thr_value,
)
cat_chldrn = cat[cat["deblend_nChild"] == 0]
cat_chldrn = cat_chldrn.copy(deep=True)
return cat_chldrn.asAstropy()
def get_deblended_images(self, data, index):
return None
def run_stack(
image_array, variance_array, psf_array, min_pix=1, bkg_bin_size=32, thr_value=5
):
"""
Function to setup the DM stack and perform detection, deblending and
measurement
Args:
image_array: Numpy array of image to run stack on
variance_array: per pixel variance of the input image_array (must
have same dimensions as image_array)
psf_array: Image of the PSF for image_array.
min_pix: Minimum size in pixels of a source to be considered by the
stack (default=1).
bkg_bin_size: Binning of the local background in pixels (default=32).
thr_value: SNR threshold for the detected sources to be included in the
final catalog(default=5).
Returns:
catalog: AstroPy table of detected sources
"""
# Convert to stack Image object
import lsst
import lsst.afw.image
import lsst.afw.math
import lsst.meas.base
import lsst.meas.algorithms
import lsst.afw.table
import lsst.meas.deblender
import lsst.afw.table
image = lsst.afw.image.ImageF(image_array)
variance = lsst.afw.image.ImageF(variance_array)
# Generate a masked image, i.e., an image+mask+variance image (mask=None)
masked_image = lsst.afw.image.MaskedImageF(image, None, variance)
# Create the kernel in the stack's format
psf_im = lsst.afw.image.ImageD(psf_array)
fkernel = lsst.afw.math.FixedKernel(psf_im)
psf = lsst.meas.algorithms.KernelPsf(fkernel)
# Passing the image to the stack
exposure = lsst.afw.image.ExposureF(masked_image)
# Assign the exposure the PSF that we created
exposure.setPsf(psf)
schema = lsst.afw.table.SourceTable.makeMinimalSchema()
config1 = lsst.meas.algorithms.SourceDetectionConfig()
# Tweaks in the configuration that can improve detection
# Change carefully!
#####
config1.tempLocalBackground.binSize = bkg_bin_size
config1.minPixels = min_pix
config1.thresholdValue = thr_value
#####
detect = lsst.meas.algorithms.SourceDetectionTask(schema=schema, config=config1)
deblend = lsst.meas.deblender.SourceDeblendTask(schema=schema)
config1 = lsst.meas.base.SingleFrameMeasurementConfig()
# config1.plugins.names.add('ext_shapeHSM_HsmShapeRegauss')
# config1.plugins.names.add('ext_shapeHSM_HsmSourceMoments')
# config1.plugins.names.add('ext_shapeHSM_HsmPsfMoments')
measure = lsst.meas.base.SingleFrameMeasurementTask(schema=schema, config=config1)
table = lsst.afw.table.SourceTable.make(schema)
detect_result = detect.run(table, exposure) # run detection task
catalog = detect_result.sources
deblend.run(exposure, catalog) # run the deblending task
measure.run(catalog, exposure) # run the measuring task
catalog = catalog.copy(deep=True)
return catalog
class Scarlet_params(Measurement_params):
""""""
iters = 200 # Maximum number of iterations for scarlet to run
e_rel = 1e-5 # Relative error for convergence
detect_centers = True
def __init__(self, show_scene=False):
"""Class with functions that describe how scarlet should deblend
images in the input data
Args:
show_scene: If True plot the scarlet deblended model and residual
image (default is False).
"""
self.show_scene = show_scene
@staticmethod
def get_centers(image):
"""Returns centers from SEP detection on the band averaged mean of the
input image.
Args:
image: Numpy array of multi-band image to run scarlet on
[Number of bands, height, width].
Returns:
Array of x and y coordinate of centroids of objects in the image.
"""
sep = __import__("sep")
detect = image.mean(axis=0) # simple average for detection
bkg = sep.Background(detect)
catalog = sep.extract(detect, 1.5, err=bkg.globalrms)
return np.stack((catalog["x"], catalog["y"]), axis=1)
def scarlet_initialize(self, images, peaks, psfs, variances, bands):
"""Initializes scarlet ExtendedSource at locations specified as
peaks in the (multi-band) input images.
Args:
images: Numpy array of multi-band image to run scarlet on
[Number of bands, height, width].
peaks: Array of x and y coordinate of centroids of objects in
the image [number of sources, 2].
psfs: Numpy array of psf image in all bands [Number of bands,
height, width].
variances: Variance image of the blend scene[Number of bands,
height, width].
bands: List of filter names in which to simulate images.
Returns:
blend: scarlet.Blend object for the initialized sources
observation: scarlet.Observation object with information to render
the scarlet model.
"""
scarlet = __import__("scarlet")
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(bands))
model_frame = scarlet.Frame(images.shape, psfs=model_psf, channels=bands)
observation = scarlet.Observation(
images, psfs=scarlet.ImagePSF(psfs), weights=1.0 / variances, channels=bands
).match(model_frame)
sources = []
for n, peak in enumerate(peaks):
result = scarlet.ExtendedSource(
model_frame,
(peak[1], peak[0]),
observation,
thresh=1,
shifting=True,
)
sources.append(result)
blend = scarlet.Blend(sources, observation)
blend.fit(self.iters, e_rel=self.e_rel)
if self.show_scene:
plot_utils.show_scarlet_residual(
blend, observation=observation, limits=(30, 90)
)
return blend, observation
def get_deblended_images(self, data, index):
"""Deblend input images with scarlet.
Args:
data (dict): Output generated by btk.draw_blends containing blended
images, isolated images, observing conditions and blend
catalog, for a given batch.
index (int): Index number of blend scene in the batch to preform
measurement on.
Returns:
a dict with the scarlet deblended images and peaks of the sources.
"""
images = np.transpose(data["blend_images"][index], axes=(2, 0, 1))
bands = []
psf_stamp_size = 41
psfs = np.zeros((len(images), psf_stamp_size, psf_stamp_size), dtype=np.float32)
variances = np.zeros_like(images)
n_bands = images.shape[0]
for i in range(n_bands):
bands.append(data["obs_condition"][i].filter_band)
obs_conds = data["obs_condition"][i]
psf, mean_sky_level = obs_conds.get_psf_sky(psf_stamp_size)
psfs[i] = psf
variances[i] = images[i] + mean_sky_level
blend_cat = data["blend_list"][index]
if self.detect_centers:
peaks = self.get_centers(images)
else:
peaks = np.stack((blend_cat["dx"], blend_cat["dy"]), axis=1)
blend, observation = self.scarlet_initialize(
images, peaks, psfs, variances, np.array(bands, dtype=str)
)
im, selected_peaks = [], []
for k, component in enumerate(blend):
y, x = component.center
selected_peaks.append([x, y])
model = component.get_model()
model_ = observation.render(model)
im.append(np.transpose(model_, axes=(1, 2, 0)))
return {"deblend_image": np.array(im), "peaks": selected_peaks}
def make_true_seg_map(image, threshold):
"""Returns a boolean segmentation map corresponding to pixels in
image above a certain threshold value.
Args:
image: Image to estimate segmentation map of
threshold: Pixels above this threshold are marked as belonging to
segmentation map
Returns:
Boolean segmentation map of the image
"""
seg_map = np.zeros_like(image)
seg_map[image < threshold] = 0
seg_map[image >= threshold] = 1
return seg_map.astype(np.bool)
def basic_selection_function(catalog, max_size=4, max_mag=27):
"""Apply selection cuts to the input catalog.
Only galaxies that satisfy the below criteria are returned:
1) i band magnitude less than 27
2) Second moment size is less than 4 arcsec.
Second moments size (r_sec) computed as described in A1 of Chang et.al 2012
Args:
catalog: CatSim-like catalog from which to sample galaxies.
Returns:
CatSim-like catalog after applying selection cuts.
"""
(q,) = np.where((catalog["btk_size"] <= max_size) & (catalog["ref_mag"] <= max_mag))
return catalog[q]
class Basic_measure_params(Measurement_params):
"""Class to perform detection by identifying peaks with skimage"""
@staticmethod
def get_centers(image):
"""Return centers detected when object detection is performed on the
input image with skimage.feature.peak_local_max.
Args:
image (np.ndarray): Image (single band) of galaxy to perform measurement
Returns:
centers: x and y coordinates of detected centroids
"""
# set detection threshold to 5 times std of image
threshold = 5 * np.std(image)
coordinates = skimage.feature.peak_local_max(
image, min_distance=2, threshold_abs=threshold
)
return np.stack((coordinates[:, | |
[]
prev_cfg_data = []
for _cfg, _cfg_sec, _cfg_repeat, _exclude_common_secs in cfg_file_list:
if not _cfg:
continue
if _cfg_repeat:
assert prev_cfg_data, "repeat cfg found without previous cfg data: {}".format(_cfg)
txt = 'Processing repeat parameters from {:s}'.format(_cfg)
if _exclude_common_secs:
txt = '{} without common sections'.format(txt)
print(txt)
else:
prev_cfg_data = read_cfg(_cfg, enable_cache=cfg_cache)
# _sections = [(k, i) for k, i in _sections]
nodes, nodes_by_fullname, _sections, _file_args, file_args_offset, root_sec_name = prev_cfg_data
n_file_args = len(_file_args)
"""remove excluded sections"""
excluded_cfg_sec = [_sec.lstrip('!') for _sec in _cfg_sec if _sec.startswith('!')]
section_names, section_line_ids, section_end_ids, section_seq_ids, section_template_ids = zip(
*[(_sec[0], _sec[1], _sec[2], i, _sec[4]) for i, _sec in enumerate(_sections)
if _sec not in excluded_cfg_sec])
if excluded_cfg_sec:
print('Excluding section(s):\n{}'.format(pformat(excluded_cfg_sec)))
_cfg_sec = [_sec for _sec in _cfg_sec if _sec not in excluded_cfg_sec]
assert _cfg_sec, 'No included sections found for {}'.format(_cfg)
if not _exclude_common_secs:
"""add common sections"""
common_section_names = [s for __i, s in enumerate(section_names) if
nodes[section_seq_ids[__i]].is_common]
_cfg_sec += common_section_names
"""unique section names"""
_cfg_sec = list(set(_cfg_sec))
"""specific sections from full names"""
invalid_sec = [(_id, _sec) for _id, _sec in enumerate(_cfg_sec) if _sec not in section_names]
specific_sec = []
specific_sec_ids = {}
# _node_matches = {( _id, _sec) : nodes[k] for _id, _sec in invalid_sec for k in nodes
# if nodes[k].full_name == _sec}
for _id, _sec in invalid_sec:
try:
_node_matches = nodes_by_fullname[_sec] # type: list
except KeyError:
raise AssertionError('Section {} not found in {}'.format(
_sec, _cfg))
# curr_specific_sec = []
for _node in _node_matches: # type:Node
parent = _node.parent
specific_sec.append((_node.seq_id, _node.name))
specific_sec_ids[_node.seq_id] = 0
specific_sec.append((parent.seq_id, parent.name))
specific_sec_ids[parent.seq_id] = 1
# if _node.parent.template_id:
# shared_parents = template_nodes[_node.parent.template_id]
# else:
# shared_parents = [_node.parent, ]
#
# for parent in shared_parents:
# specific_sec.append((parent.seq_id, parent.name))
# specific_sec_ids.append(parent.seq_id)
# _sec_matches = []
# _curr_sec_node = _node
# while _curr_sec_node.parent is not None:
# _sec_matches.append((_curr_sec_node.seq_id, _curr_sec_node.name))
# _curr_sec_node = _curr_sec_node.parent
# specific_sec += _sec_matches[::-1]
# specific_sec[_sec] = curr_specific_sec
_cfg_sec[_id] = ''
# valid_check = [_sec in sections for _sec in _cfg_sec]
# assert all(valid_check), \
# 'One or more sections: {} from:\n{}\nnot found in cfg file {} with sections:\n{}'.format(
# [_sec for _sec in _cfg_sec if _sec not in sections],
# pformat(_cfg_sec), _cfg, pformat(sections))
"""all occurrences of each section
"""
_cfg_sec_ids = [[i for i, x in enumerate(section_names) if _sec and x == _sec] for _sec in _cfg_sec]
# _cfg_sec_ids = [item for sublist in _cfg_sec_ids for item in sublist]
"""flatten
"""
__cfg_sec_ids = []
__cfg_sec = []
is_included = defaultdict(bool)
for _sec, _sec_ids in zip(_cfg_sec, _cfg_sec_ids):
for _sec_id in _sec_ids:
__cfg_sec.append(_sec)
__cfg_sec_ids.append(_sec_id)
is_included[section_seq_ids[_sec_id]] = True
_cfg_sec_disp = []
valid_cfg_sec = []
skipped_cfg_sec = []
skipped_parent_seq_ids = []
_sec_args = []
valid_parent_names = [root_sec_name, ]
valid_parent_seq_ids = [None, ]
_common_str = ''
for _sec_id, x in specific_sec:
__cfg_sec_ids.append(_sec_id)
is_included[section_seq_ids[_sec_id]] = True
__cfg_sec.append(x)
# n_sections = len(sections)
"""sort by line and process each cfg section
"""
__cfg_sec_sorted = sorted(zip(__cfg_sec_ids, __cfg_sec))
# __cfg_seq_ids = [section_seq_ids[k[0]] for k in __cfg_sec_sorted]
for _sec_id, x in __cfg_sec_sorted:
_curr_sec_seq_id = section_seq_ids[_sec_id]
_curr_sec_node = nodes[_curr_sec_seq_id] # type: Node
_curr_sec_parent_name = _curr_sec_node.parent.name
_curr_sec_parent_seq_id = _curr_sec_node.parent.seq_id
_curr_sec_seq_id = _curr_sec_node.seq_id
_curr_sec_name = section_names[_sec_id]
if _curr_sec_parent_seq_id not in valid_parent_seq_ids:
# if _sec_id in specific_sec_ids and specific_sec_ids[_sec_id] == 0:
# raise AssertionError('Specific section {} not found'.format(_curr_sec_ancestral_path))
# print('skipping section {}'.format(_curr_sec_ancestral_path))
if _curr_sec_parent_seq_id in skipped_parent_seq_ids:
skipped_cfg_sec.append(x)
skipped_parent_seq_ids.append(_curr_sec_seq_id)
continue
if _curr_sec_name == '__exc__':
"""exclusive sibling section"""
included_siblings = [(_node.seq_id, _node.name) for _node in _curr_sec_node.parent.children
if is_included[_node.seq_id] and _node.seq_id != _curr_sec_seq_id]
if included_siblings:
assert len(included_siblings) == 1, \
"multiple included siblings for " \
"exclusive section with parent {},{} :: {}".format(
_curr_sec_parent_seq_id, _curr_sec_parent_name, included_siblings)
print('skipping exclusive section {} with parent {},{} due to included sibling: {}'.format(
_curr_sec_seq_id, _curr_sec_parent_seq_id, _curr_sec_parent_name, included_siblings[0]
))
skipped_cfg_sec.append(x)
skipped_parent_seq_ids.append(_curr_sec_seq_id)
continue
_curr_sec_full_name = _curr_sec_node.full_name
_curr_sec_parent_full_name = _curr_sec_node.parent.full_name
ancestors = _curr_sec_node.get_ancestors()
_curr_sec_ancestral_path = ':'.join([ancestor.name for ancestor in ancestors[::-1]
if ancestor.name not in common_section_names] +
[_curr_sec_name, ])
_curr_sec_root_name = ancestors[-1].name if ancestors else _curr_sec_name
assert x == _curr_sec_name, "mismatch between x: {} and _curr_sec_name: {}".format(x, _curr_sec_name)
valid_parent_seq_ids.append(_curr_sec_seq_id)
valid_parent_names.append(x)
valid_cfg_sec.append(x)
_start_id = section_line_ids[_sec_id] + 1
_template_id = section_template_ids[_sec_id]
# if _template_id:
# """template sections with the same ID all have same line IDs so look for the
# first subsequent section with different ID that is not an ancestor;
# """
# _end_id = n_file_args
# _next_sec_id = None
# # ancestors = _curr_sec_node.get_ancestors()
# # ancestor_template_ids = [ancestor.template_id for ancestor in ancestors]
# for i in range(_sec_id + 1, n_sections):
# if section_template_ids[i] != _template_id and section_line_ids[i] > _start_id:
# _next_node = nodes[section_seq_ids[i]]
# _next_template_id = section_template_ids[i]
# _end_id = section_line_ids[i]
# _next_sec_id = i
# break
# else:
# _end_id = section_line_ids[_sec_id + 1] if _sec_id < n_sections - 1 else n_file_args
_end_id = section_end_ids[_sec_id]
# discard empty and comment lines from start of section
orig_start_id = _start_id
while _start_id < n_file_args:
if _file_args[_start_id] and not _file_args[_start_id].startswith('#'):
# if _file_args[_start_id - 1]:
break
_start_id += 1
# discard empty and comment lines from end of section
orig_end_id = _end_id
while _end_id >= 0:
if _file_args[_end_id - 1] and not _file_args[_end_id - 1].startswith('#'):
# if _file_args[_end_id - 1]:
break
_end_id -= 1
if _start_id >= _end_id:
if x not in common_section_names:
# print('skipping empty section {} ({}, {})'.format(x, orig_start_id, orig_end_id))
assert orig_start_id == orig_end_id, "invalid empty section {} ({}, {})".format(
x, orig_start_id, orig_end_id)
continue
_curr_sec_args = _file_args[_start_id:_end_id]
for i, _curr_sec_arg in enumerate(_curr_sec_args):
_curr_sec_args[i] = _curr_sec_args[i].replace('__name__', _curr_sec_name)
_curr_sec_sub_names = _curr_sec_name.split('_')
if len(_curr_sec_sub_names) > 1:
for sub_name_id, sub_name in enumerate(_curr_sec_sub_names):
_curr_sec_args[i] = _curr_sec_args[i].replace('__name{}__'.format(sub_name_id), sub_name)
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent__', _curr_sec_parent_name)
if '__g_parent__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_parent__', _curr_sec_node.parent.parent.name)
if '__gg_parent__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_parent__',
_curr_sec_node.parent.parent.parent.name)
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__root__', _curr_sec_root_name)
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__full__', _curr_sec_full_name)
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent_full__',
_curr_sec_parent_full_name)
if '__g_full__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_full__',
'{}_{}_{}'.format(
_curr_sec_node.name,
_curr_sec_node.parent.name,
_curr_sec_node.parent.parent.name,
)
)
if '__gg_full__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_full__',
'{}_{}_{}_{}'.format(
_curr_sec_node.name,
_curr_sec_node.parent.name,
_curr_sec_node.parent.parent.name,
_curr_sec_node.parent.parent.parent.name
)
)
if '__ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__ratio__',
str(float(_curr_sec_name) / 100.0))
if '__parent_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent_ratio__',
str(float(_curr_sec_parent_name) / 100.0))
if '__g_parent_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_parent_ratio__',
str(float(_curr_sec_node.parent.parent.name) / 100.0))
if '__gg_parent_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_parent_ratio__',
str(float(_curr_sec_node.parent.parent.parent.name) / 100.0))
if '__list__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__list__',
','.join(_curr_sec_name.split('_')))
if '__parent_list__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent_list__',
','.join(_curr_sec_parent_name.split('_')))
if '__g_parent_list__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_parent_list__',
','.join(_curr_sec_node.parent.parent.name.split('_')))
if '__gg_parent_list__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_parent_list__',
','.join(_curr_sec_node.parent.parent.parent.name.split('_')))
def name_to_list_ratio(_name):
temp = _name.split('_')
for k_id, k in enumerate(temp):
if k.startswith('n'):
k = k.replace('n', '-')
k = str(float(k) / 100.0)
temp[k_id] = k
return ','.join(temp)
if '__list_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__list_ratio__',
name_to_list_ratio(_curr_sec_name))
if '__parent_list_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent_list_ratio__',
name_to_list_ratio(_curr_sec_parent_name))
if '__g_parent_list_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_parent_list_ratio__',
name_to_list_ratio(_curr_sec_node.parent.parent.name))
if '__gg_parent_list_ratio__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_parent_list_ratio__',
name_to_list_ratio(_curr_sec_node.parent.parent.parent.name))
if '__range__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__range__',
':'.join(_curr_sec_name.split('_')))
if '__parent_range__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__parent_range__',
':'.join(_curr_sec_parent_name.split('_')))
if '__g_parent_range__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__g_parent_range__',
':'.join(_curr_sec_node.parent.parent.name.split('_')))
if '__gg_parent_range__' in _curr_sec_args[i]:
_curr_sec_args[i] = _curr_sec_args[i].replace(
'__gg_parent_range__',
':'.join(_curr_sec_node.parent.parent.parent.name.split('_')))
_sec_args += _curr_sec_args
start_line_num = _start_id + 1 - file_args_offset
end_line_num = _end_id - file_args_offset
if x not in common_section_names:
# if _sec_id in specific_sec_ids and not _curr_sec_node.parent.is_root:
# _sec_disp_name = _curr_sec_full_name
# # elif not _curr_sec_node.parent.is_root:
# # _sec_disp_name = '{}:{}'.format(_curr_sec_parent_name, _curr_sec_name)
# else:
# _sec_disp_name = _curr_sec_ancestral_path
_sec_disp_name = _curr_sec_ancestral_path
_str = '{}: {}'.format(_sec_disp_name, start_line_num)
if end_line_num > start_line_num:
_str = '{} -> {}'.format(_str, end_line_num)
_cfg_sec_disp.append(_str)
else:
_str = '{}'.format(start_line_num)
if end_line_num > start_line_num:
_str = '{} -> {}'.format(_str, end_line_num)
_common_str = '{}, {}'.format(_common_str, _str) if _common_str else _str
# print(_str)
# pass
invalid_cfg_sec = [k for k in __cfg_sec if k and k not in valid_cfg_sec + skipped_cfg_sec]
if invalid_cfg_sec:
raise AssertionError('Invalid cfg sections provided for {}:\n {}'.format(_cfg, invalid_cfg_sec))
if _common_str:
_common_str = 'common: {}'.format(_common_str)
_cfg_sec_disp.append(_common_str)
print('\t{}'.format(
'\n\t'.join(_cfg_sec_disp)
# pformat(_cfg_sec_disp)
))
file_args = [arg.strip() for arg in _sec_args if arg.strip()]
# lines starting with # in the cfg file are comments or section | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ## ###############################################
#
# interfaceTools.py
# Contains all the tools for creating an interface
#
# Autor: <NAME>
# License: MIT
#
# ## ###############################################
from locale import normalize
from tkinter import * # Carga módulo tk (widgets estándar)
from tkinter import filedialog as fd
from tkinter import ttk # Carga ttk (para widgets nuevos 8.5+)
from tkinter import messagebox
from tkinter.font import Font
from PIL import ImageTk, Image
import cv2
import os
import src.oibread as oib
import src.tiff as tif
import src.lsmread as lsm
from .imageFunctions import istiffRGB
# Define la ventana principal de la aplicación
mainWindow = Tk()
psftypes = ['psf.ISOTROPIC | psf.EXCITATION','psf.ISOTROPIC | psf.EMISSION','psf.ISOTROPIC | psf.WIDEFIELD','psf.ISOTROPIC | psf.CONFOCAL',
'psf.ISOTROPIC | psf.TWOPHOTON','psf.GAUSSIAN | psf.EXCITATION','psf.GAUSSIAN | psf.EMISSION','psf.GAUSSIAN | psf.WIDEFIELD','psf.GAUSSIAN | psf.CONFOCAL',
'psf.GAUSSIAN | psf.TWOPHOTON','psf.GAUSSIAN | psf.EXCITATION | psf.PARAXIAL','psf.GAUSSIAN | psf.EMISSION | psf.PARAXIAL','psf.GAUSSIAN | psf.WIDEFIELD | psf.PARAXIAL',
'psf.GAUSSIAN | psf.CONFOCAL | psf.PARAXIAL','psf.GAUSSIAN | psf.TWOPHOTON | psf.PARAXIAL']
file = ''
filesName = []
filesPath = []
statusBar = None
tensor_img = None
panelImg = None
cmbxFile, opcSF = (None, None)
windows_img = []
infoFile = {}
currentDir = os.getcwd()
def openFile():
"""This function open files of type .oib .tif and .bmp"""
global file, tensor_img, panelImg, currentDir
filepath = fd.askopenfilename(initialdir = currentDir, title = 'Select a file', defaultextension = '*.*', filetypes = (('oib files','*.oib'),('lsm files','*.lsm'),('tif files','*.tif'),('bmp files','*.bmp'),('png files','*.png'),('jpg files','*.jpg')))
currentDir = filepath
if(len(filepath)>0):
try:
import src.imageFunctions as imf
nameFile = filepath.split('/')[-1]
if(os.path.splitext(nameFile)[1]=='.tif'):
metadata = tif.getMetadata(filepath)
if(not(istiffRGB(metadata['tensor'].shape))):
print('File: ', nameFile)
print('Shape: ', metadata['tensor'].shape)
if(metadata['tensor'].ndim==4 or metadata['tensor'].ndim==3):
venImg = NewWindow(filepath, metadata = metadata, image = True)
venImg.desplay_image(metadata['tensor'])
windows_img.append(venImg)
else:
venImg = NewWindow(filepath, metadata = metadata, image = True)
venImg.placeImage(metadata['tensor'])
venImg.tensor_img = metadata['tensor']
windows_img.append(venImg)
elif(os.path.splitext(nameFile)[1]=='.lsm'):
metadata = lsm.getMetadata(filepath)
if(not(istiffRGB(metadata['tensor'].shape))):
print('File: ', nameFile)
print('Shape: ', metadata['tensor'].shape)
if(metadata['tensor'].ndim==4 or metadata['tensor'].ndim==3):
venImg = NewWindow(filepath, metadata = metadata, image = True)
venImg.desplay_image(metadata['tensor'])
windows_img.append(venImg)
else:
venImg = NewWindow(filepath, metadata = metadata, image = True)
venImg.placeImage(metadata['tensor'])
venImg.tensor_img = metadata['tensor']
windows_img.append(venImg)
elif(os.path.splitext(nameFile)[1]=='.oib'):
metadata = oib.getMetadata(filepath)
print('File: ', nameFile)
print('Shape: ', metadata['tensor'].shape)
venImg = NewWindow(filepath, metadata = metadata,image = True)
venImg.desplay_image(metadata['tensor'])
windows_img.append(venImg)
else:
import cv2
print('File: ', nameFile)
metadata = imf.getMetadataImg(filepath)
venImg = NewWindow(filepath, metadata=metadata, image = True)
venImg.placeImage(metadata['tensor'])
venImg.tensor_img = metadata['tensor']
windows_img.append(venImg)
except IndexError:
messagebox.showinfo(message='Format not supported')
def saveFile():
"""This function save files of type .oib .tif and .bmp"""
global cmbxFile, opcSF
if(len(windows_img)>0):
opcSF = NewWindow('Save File','300x100')
opcSF.createLabel('What image do you want to save?',20,20)
windows_img_names = getNamesWindows()
cmbxFile = opcSF.createCombobox2(windows_img_names,20,50)
opcSF.createButton('Save', saveFileEvent, 'bottom')
else:
messagebox.showinfo(message='No file has been opened')
def saveFileEvent():
global cmbxFile, opcSF, currentDir
import tifffile
import os
import numpy as np
from src.imageFunctions import istiffRGB
selected_file = cmbxFile.current()
image = windows_img[selected_file].tensor_img
namewin = windows_img[selected_file].nameWindow
try:
if(image.ndim==4):
savepath = fd.asksaveasfilename(initialdir = currentDir,title = 'Select a file', defaultextension = '.tif', initialfile = namewin, filetypes = (('tif files','*.tif'),))
currentDir = filepath
if (savepath!=''):
tifffile.imsave(savepath, np.uint16(image*(65535/image.max())), imagej=True)
printMessage('Saved file: '+savepath)
if(image.ndim==3):
savepath = fd.asksaveasfilename(initialdir = currentDir,title = 'Select a file', defaultextension = '.tif', initialfile = namewin, filetypes = (('tif files','*.tif'),('png files','*.png'),('jpg files','*.jpg'),('bmp files','*.bmp')))
currentDir = filepath
if(not(istiffRGB(image.shape))):
tifffile.imsave(savepath, np.uint16(image*(65535/image.max())), imagej=True)
printMessage('Saved file: '+savepath)
else:
cv2.imwrite(savepath, image)
printMessage('Saved file: '+savepath)
if(image.ndim==2):
savepath = fd.asksaveasfilename(initialdir = currentDir,title = 'Select a file', defaultextension = '.png', initialfile = namewin, filetypes = (('png files','*.png'),('jpg files','*.jpg'),('bmp files','*.bmp')))
currentDir = filepath
cv2.imwrite(savepath, image)
printMessage('Saved file: '+savepath)
opcSF.destroy()
except:
messagebox.showinfo(message='Error when trying to save the file, try again')
print("Error: ", sys.exc_info()[0])
def printMessage(message):
print(message)
statusBar.configure(text = message)
def getNamesWindows():
names = []
for window_object in windows_img:
names.append(window_object.nameWindow)
return names
def getFormatTime(time):
print(time)
minutes = int(time/60)
seconds = int(time%60)
return (minutes, seconds)
def createWindowMain():
"""Definition of the main window"""
# Define la ventana principal de la aplicación
#mainWindow = Tk()
mainWindow.geometry('500x50') # anchura x altura
# Asigna un color de fondo a la ventana.
mainWindow.configure(bg = 'beige')
# Asigna un título a la ventana
mainWindow.title('IFC Microspy')
#mainWindow.iconbitmap('icon/ifc.ico')
mainWindow.tk.call('wm', 'iconphoto', mainWindow._w, PhotoImage(file='src/icon/ifc.png'))
mainWindow.resizable(width=False,height=False)
#return mainWindow
#def createMenu(mainWindow):
def createMenu():
"""This function creates a menu"""
#Barra superior
menu = Menu(mainWindow)
mainWindow.config(menu=menu)
return menu
def createOption(menu):
"""This function creates a menu option"""
opc = Menu(menu, tearoff=0)
return opc
def createCommand(opc, labelName, commandName):
"""This function creates a command"""
opc.add_command(label=labelName, command = commandName)
def createCascade(menu, labelName, option):
"""This function creates a tab main"""
menu.add_cascade(label=labelName, menu=option)
def createButton(text, command, side):
"""This function creates a button"""
ttk.Button(mainWindow, text=text, command=command).pack(side=side)
def createEntry(stringVar,x,y):
"""This function creates a entry"""
entry = ttk.Entry(mainWindow, textvariable=stringVar)
entry.place(x=x, y=y)
return entry
def createLabel(text,x,y, family = "Helvetica", size = 11, weight = "normal", slant = "roman", underline=0):
"""This function creates a label"""
font = Font(family = family,size = size,weight = weight)
label = Label(mainWindow, text=text, font=font).place(x=x, y=y)
def createStringVar():
"""This function creates a StringVar"""
nombre = StringVar()
return nombre
def createStatusBar():
"""This function creates a status bar"""
global statusbar
v = os.popen('git tag').read().split('\n')
statusbar = Label(mainWindow, text='IFC Microspy '+v[0], bd=1, relief=SUNKEN, anchor=W)
statusbar.pack(side=BOTTOM, fill=X)
return statusbar
class NewWindow:
"""This class contains the functions to define a window"""
def __init__(self,nameWindow,size = None, metadata = None, image = False):
if image:
self.metadata = metadata
self.nameFile = nameWindow.split('/')[-1]
self.nameWindow = self.nameFile.split('.')[0]
self.path = nameWindow
self.text = '\t'
self.img, self.tensor_img = (None, None)
self.inx0, self.inx1, self.inx2, self.inx3 = (1, 1, 1, 1)
self.inx0p, self.inx1p, self.inx2p, self.inx3p = (None, None, None, None) #previous index
self.posz, self.posc, self.post, self.percent = (0, 0, 0, 100)
self.normalize = False
else:
self.nameWindow = nameWindow
self.nameFile, self.path = (None, None)
self.window = Toplevel(mainWindow)
self.window.protocol("WM_DELETE_WINDOW", self.on_closing)
self.window.geometry(size) # (width, height)
#self.window.configure(bg = 'beige')
self.window.tk.call('wm', 'iconphoto', self.window._w, PhotoImage(file='src/icon/ifc.png'))
self.window.resizable(width=False,height=False)
self.window.title(self.nameWindow)
def on_closing(self):
print('Closed: ', self.nameWindow)
if (self.nameWindow in filesName):
filesName.remove(self.nameWindow)
if (self in windows_img):
windows_img.remove(self)
self.window.destroy()
def destroy(self):
self.window.destroy()
def placeImage(self,img):
self.tensor_img = img
self.validateSize()
self.createStatusBar()
self.createCheakButton('normalize',275,0)
resized = self.resize_image_percent(img, self.percent)
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg = Label(self.window, image = imageTk)
self.panelImg.image = imageTk
self.panelImg.pack()
ttk.Button(self.window, text='+', command=self.increase_size).place(x=0,y=0,width=20,height=20)
ttk.Button(self.window, text='-', command=self.decrease_size).place(x=25,y=0,width=20,height=20)
def placeImageTensor(self,img):
# resize image
resized = self.resize_image_percent(img, self.percent)
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panel = Label(self.window, image = imageTk)
self.panel.image = imageTk
self.panel.pack()
return self.panel
def placeImageAbout(self,img):
imageTk = ImageTk.PhotoImage(image=Image.fromarray(img))
self.panel = Label(self.window, image = imageTk)
self.panel.image = imageTk
self.panel.pack()
return self.panel
def createButton(self,text, command, side):
ttk.Button(self.window, text=text, command=command).pack(side=side)
def createButtonXY(self,text, command, x, y):
ttk.Button(self.window, text=text, command=command).place(x=x,y=y)
def createLabel(self,text,x,y, family = "Helvetica", size = 11, weight = "normal", slant = "roman", underline=0):
font = Font(family = family,size = size,weight = weight)
label = Label(self.window, text=text, font=font).place(x=x, y=y)
def createEntry(self,stringVar,x,y, width=10,disabled=False):
if disabled:
entry = ttk.Entry(self.window, width=width)
entry.insert(0,stringVar)
entry.configure(state=DISABLED)
else:
entry = ttk.Entry(self.window, width=width)
entry.insert(0, stringVar)
entry.place(x=x, y=y)
return entry
def createCombobox(self,x,y):
global files
dropdown = ttk.Combobox(self.window, state="readonly",values = psftypes, width=40)
dropdown.place(x=x, y=y)
if (len(filesName)>0):
dropdown.current(0)
dropdown.current(13)
return dropdown
def createCombobox2(self,values,x,y,width=40):
dropdown = ttk.Combobox(self.window, state="readonly",values = values, width=width)
dropdown.place(x=x, y=y)
if (len(values)>0):
dropdown.current(0)
return dropdown
def createCheakButton(self, text, x, y):
self.checkvar = IntVar()
Checkbutton(self.window, text=text, command=self.normalizeImgPanel, variable=self.checkvar, onvalue=1, offvalue=0, height=1, width=6).place(x=x, y=y)
def normalizeImgPanel(self):
if(self.checkvar.get()):
print('Normalizeing ',self.nameFile)
self.normalize = True
self.updatePanel()
else:
self.normalize = False
self.updatePanel()
def scrollbarz(self, maxpos):
self.scrollbarz = Scrollbar(self.window, orient=HORIZONTAL, command=self.scrollImagez)
self.scrollbarz.pack(side=BOTTOM,fill=X)
self.listboxz = Listbox(self.window, yscrollcommand=self.scrollbarz.set)
for i in range(10+maxpos):
self.listboxz.insert("end", '')
self.listboxz.place(x=50,y=50)
return self.scrollbarz
def scrollbart(self, maxpos):
self.scrollbart = Scrollbar(self.window, orient=HORIZONTAL, command=self.scrollImaget)
self.scrollbart.pack(side=BOTTOM,fill=X)
self.listboxt = Listbox(self.window, yscrollcommand=self.scrollbart.set)
for i in range(10+maxpos):
self.listboxt.insert("end", '')
self.listboxt.place(x=50,y=50)
return self.scrollbart
def scrollbarc(self, maxpos):
self.scrollbarc = Scrollbar(self.window, orient=HORIZONTAL, command=self.scrollImagec)
self.scrollbarc.pack(side=BOTTOM,fill=X)
self.listboxc = Listbox(self.window, yscrollcommand=self.scrollbarc.set)
for i in range(10+maxpos):
self.listboxc.insert("end", '')
self.listboxc.place(x=50,y=50)
return self.scrollbarc
def createStatusBar(self):
self.text = self.text + ' (' +str(self.metadata['X'])+ 'x' +str(self.metadata['Y'])+ ')' +' '+str(self.percent)+'%' + ' type: '+ str(self.metadata['type'])
self.statusbar = Label(self.window, text=self.text, bd=1, relief=SUNKEN, anchor=W)
self.statusbar.pack(side=TOP, fill=X)
return self.statusbar
def update_axes(self):
if ('channels' in self.metadata):
self.scrollbarc(self.metadata['channels']['value']-1)
self.text = self.text + ' c:'+str(self.posc+1)+'/'+str(self.metadata['channels']['value'])
if ('frames' in self.metadata):
self.scrollbart(self.metadata['frames']['value']-1)
self.text = self.text + ' t:'+str(self.post+1)+'/'+str(self.metadata['frames']['value'])
if ('slices' in self.metadata):
self.scrollbarz(self.metadata['slices']['value']-1)
self.text = self.text + ' z:'+str(self.posz+1)+'/'+str(self.metadata['slices']['value'])
def update_text(self):
self.text = '\t'
if ('channels' in self.metadata):
self.text = self.text + ' c:'+str(self.posc+1)+'/'+str(self.metadata['channels']['value'])
if ('frames' in self.metadata):
self.text = self.text + ' t:'+str(self.post+1)+'/'+str(self.metadata['frames']['value'])
if ('slices' in self.metadata):
self.text = self.text + ' z:'+str(self.posz+1)+'/'+str(self.metadata['slices']['value'])
self.text = self.text + ' (' +str(self.metadata['X'])+ 'x' +str(self.metadata['Y'])+ ')' +' '+str(self.percent)+'%' + ' type: '+ str(self.metadata['type'])
def resize_image_percent(self, img, percent):
import cv2
import numpy as np
import src.imageFunctions as imf
width = int(img.shape[1] * percent / 100)
height = int(img.shape[0] * percent / 100)
dim = (width, height)
if self.normalize:
img = imf.normalizeImg(img, self.metadata)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_LINEAR)
if (resized.dtype=='uint16'):
resized = resized*(255/4095)
return resized
def scrollImagez(self, *args):
if ('-1' in args and self.posz > 0):
self.posz = self.posz - 1
if ('1' in args and self.posz < self.metadata['slices']['value']-1):
self.posz = self.posz + 1
self.update_text()
self.statusbar.configure(text = self.text)
self.updatePositionS(self.posz)
print('New Posaxis-z: ',self.posz+1)
if (self.tensor_img.ndim==4):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2,self.inx3p:self.inx3].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
if (self.tensor_img.ndim==3):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg['image'] = imageTk
self.panelImg.image = imageTk
self.scrollbarz.config(command=self.listboxz.yview(self.posz))
def scrollImaget(self, *args):
if ('-1' in args and self.post > 0):
self.post = self.post - 1
if ('1' in args and self.post < self.metadata['frames']['value']-1):
self.post = self.post + 1
self.update_text()
self.statusbar.configure(text = self.text)
self.updatePositionF(self.post)
print('New Posaxis-t: ',self.post+1)
if (self.tensor_img.ndim==4):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2,self.inx3p:self.inx3].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
if (self.tensor_img.ndim==3):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg['image'] = imageTk
self.panelImg.image = imageTk
self.scrollbart.config(command=self.listboxt.yview(self.post))
def scrollImagec(self, *args):
if ('-1' in args and self.posc > 0):
self.posc = self.posc - 1
if ('1' in args and self.posc < self.metadata['channels']['value']-1):
self.posc = self.posc + 1
self.update_text()
self.statusbar.configure(text = self.text)
self.updatePositionC(self.posc)
print('New Posaxis-c: ',self.posc+1)
if (self.tensor_img.ndim==4):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2,self.inx3p:self.inx3].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
if (self.tensor_img.ndim==3):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg['image'] = imageTk
self.panelImg.image = imageTk
self.scrollbarc.config(command=self.listboxc.yview(self.posc))
def desplay_image(self, tensor_img):
self.tensor_img = tensor_img
if tensor_img.ndim == 4:
self.validateSize()
self.update_axes()
self.createStatusBar()
self.updateIndex()
self.createCheakButton('normalize',275,0)
self.panelImg = self.placeImageTensor( tensor_img[None:self.inx0,None:self.inx1,None:self.inx2,None:self.inx3].reshape((self.metadata['X'],self.metadata['Y'])) )
ttk.Button(self.window, text='+', command=self.increase_size).place(x=0,y=0,width=20,height=20)
ttk.Button(self.window, text='-', command=self.decrease_size).place(x=25,y=0,width=20,height=20)
if tensor_img.ndim == 3:
self.validateSize()
self.update_axes()
self.createStatusBar()
self.updateIndex()
self.createCheakButton('normalize',275,0)
self.panelImg = self.placeImageTensor( tensor_img[None:self.inx0,None:self.inx1,None:self.inx2].reshape((self.metadata['X'],self.metadata['Y'])) )
ttk.Button(self.window, text='+', command=self.increase_size).place(x=0,y=0,width=20,height=20)
ttk.Button(self.window, text='-', command=self.decrease_size).place(x=25,y=0,width=20,height=20)
def updatePositionC(self, position):
if (self.metadata['channels']['index']==0):
self.inx0 = position+1
self.inx0p = position
if (self.metadata['channels']['index']==1):
self.inx1 = position+1
self.inx1p = position
if (self.metadata['channels']['index']==2):
self.inx2 = position+1
self.inx2p = position
if (self.metadata['channels']['index']==3):
self.inx3 = position+1
self.inx3p = position
def updatePositionF(self, position):
if (self.metadata['frames']['index']==0):
self.inx0 = position+1
self.inx0p = position
if (self.metadata['frames']['index']==1):
self.inx1 = position+1
self.inx1p = position
if (self.metadata['frames']['index']==2):
self.inx2 = position+1
self.inx2p = position
if (self.metadata['frames']['index']==3):
self.inx3 = position+1
self.inx3p = position
def updatePositionS(self, position):
if (self.metadata['slices']['index']==0):
self.inx0 = position+1
self.inx0p = position
if (self.metadata['slices']['index']==1):
self.inx1 = position+1
self.inx1p = position
if (self.metadata['slices']['index']==2):
self.inx2 = position+1
self.inx2p = position
if (self.metadata['slices']['index']==3):
self.inx3 = position+1
self.inx3p = position
def updateIndex(self):
indexX = self.metadata['tensor'].shape.index(self.metadata['X'])
indexY = self.metadata['tensor'].shape.index(self.metadata['Y'], indexX+1)
if (indexX==0):
self.inx0 = None
if (indexX==1):
self.inx1 = None
if (indexX==2):
self.inx2 = None
if (indexX==3):
self.inx3 = None
if (indexY==0):
self.inx0 = None
if (indexY==1):
self.inx1 = None
if (indexY==2):
self.inx2 = None
if (indexY==3):
self.inx3 = None
def resize(self):
self.update_text()
self.statusbar.configure(text = self.text)
if(self.tensor_img.ndim==2):
resized = self.resize_image_percent(self.tensor_img,self.percent)
if(self.tensor_img.ndim==3):
if(istiffRGB(self.tensor_img.shape)):
resized = self.resize_image_percent(self.tensor_img,self.percent)
else:
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
if(self.tensor_img.ndim==4):
resized = self.resize_image_percent(self.tensor_img[self.inx0p:self.inx0,self.inx1p:self.inx1,self.inx2p:self.inx2,self.inx3p:self.inx3].reshape((self.metadata['X'],self.metadata['Y'])), self.percent)
return resized
def increase_size(self):
self.percent = self.percent+10
print('Percent',self.percent)
resized = self.resize()
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg['image'] = imageTk
self.panelImg.image = imageTk
def decrease_size(self):
self.percent = self.percent-10
print('Percent',self.percent)
resized = self.resize()
imageTk = ImageTk.PhotoImage(image=Image.fromarray(resized))
self.panelImg['image'] = imageTk
self.panelImg.image = imageTk
def updatePanel(self, oldSize=0, new_percent=False):
import src.imageFunctions as imf
if new_percent:
update_percent | |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import re
import datetime
import json
import logging
import traceback
from copy import deepcopy
from django.db import models, transaction
from django.db.models import Count, Avg, Sum
from django.utils.translation import ugettext_lazy as _
from blueapps.utils import managermixins
from pipeline.core.constants import PE
from pipeline.component_framework import library
from pipeline.component_framework.constant import ConstantPool
from pipeline.models import PipelineInstance
from pipeline.engine import exceptions
from pipeline.engine import api as pipeline_api
from pipeline.engine.models import Data
from pipeline.utils.context import get_pipeline_context
from pipeline.engine import states
from pipeline.log.models import LogEntry
from pipeline.component_framework.models import ComponentModel
from pipeline.contrib.statistics.models import (
ComponentExecuteData,
InstanceInPipeline
)
from pipeline.exceptions import (
ConvergeMatchError,
ConnectionValidateError,
IsolateNodeError,
StreamValidateError
)
from pipeline.validators.gateway import validate_gateways
from pipeline_web.parser import WebPipelineAdapter
from pipeline_web.wrapper import PipelineTemplateWebWrapper
from pipeline_web.parser.format import format_node_io_to_list
from gcloud.conf import settings
from gcloud.contrib.appmaker.models import AppMaker
from gcloud.core.constant import TASK_FLOW_TYPE, TASK_CATEGORY, AE
from gcloud.core.models import Business
from gcloud.tasktmpl3.models import TaskTemplate
from gcloud.commons.template.models import replace_template_id, CommonTemplate, CommonTmplPerm
from gcloud.taskflow3.constants import (
TASK_CREATE_METHOD,
TEMPLATE_SOURCE,
)
from gcloud.core.utils import (
convert_readable_username,
strftime_with_timezone,
timestamp_to_datetime,
format_datetime,
camel_case_to_underscore_naming,
gen_day_dates,
get_month_dates
)
from gcloud.taskflow3.signals import taskflow_started
logger = logging.getLogger("root")
PIPELINE_REGEX = re.compile(r'^name|create_time|creator|create_time|executor|'
r'start_time|finish_time|is_started|is_finished')
INSTANCE_ACTIONS = {
'start': None,
'pause': pipeline_api.pause_pipeline,
'resume': pipeline_api.resume_pipeline,
'revoke': pipeline_api.revoke_pipeline
}
NODE_ACTIONS = {
'revoke': pipeline_api.resume_node_appointment,
'retry': pipeline_api.retry_node,
'skip': pipeline_api.skip_node,
'callback': pipeline_api.activity_callback,
'skip_exg': pipeline_api.skip_exclusive_gateway,
'pause': pipeline_api.pause_node_appointment,
'resume': pipeline_api.resume_node_appointment,
'pause_subproc': pipeline_api.pause_pipeline,
'resume_subproc': pipeline_api.resume_node_appointment,
}
GROUP_BY_DICT = {
'instance_details': 'instance_details'
}
class TaskFlowInstanceManager(models.Manager, managermixins.ClassificationCountMixin):
@staticmethod
def create_pipeline_instance(template, **kwargs):
pipeline_tree = kwargs['pipeline_tree']
replace_template_id(template.__class__, pipeline_tree)
pipeline_template_data = {
'name': kwargs['name'],
'creator': kwargs['creator'],
'description': kwargs.get('description', ''),
}
PipelineTemplateWebWrapper.unfold_subprocess(pipeline_tree)
pipeline_instance = PipelineInstance.objects.create_instance(
template.pipeline_template,
pipeline_tree,
spread=True,
**pipeline_template_data
)
return pipeline_instance
@staticmethod
def create_pipeline_instance_exclude_task_nodes(template, task_info, constants=None, exclude_task_nodes_id=None):
"""
@param template:
@param task_info: {
'name': '',
'creator': '',
'description': '',
}
@param constants: 覆盖参数,如 {'${a}': '1', '${b}': 2}
@param exclude_task_nodes_id: 取消执行的可选节点
@return:
"""
if constants is None:
constants = {}
pipeline_tree = template.pipeline_tree
try:
TaskFlowInstanceManager.preview_pipeline_tree_exclude_task_nodes(pipeline_tree, exclude_task_nodes_id)
except Exception as e:
return False, e.message
# change constants
for key, value in constants.items():
if key in pipeline_tree[PE.constants]:
pipeline_tree[PE.constants][key]['value'] = value
task_info['pipeline_tree'] = pipeline_tree
pipeline_inst = TaskFlowInstanceManager.create_pipeline_instance(template, **task_info)
return True, pipeline_inst
@staticmethod
def _replace_node_incoming(next_node, replaced_incoming, new_incoming):
if isinstance(next_node[PE.incoming], list):
next_node[PE.incoming].pop(next_node[PE.incoming].index(replaced_incoming))
next_node[PE.incoming].extend(new_incoming)
else:
is_boring_list = isinstance(new_incoming, list) and len(new_incoming) == 1
next_node[PE.incoming] = new_incoming[0] if is_boring_list else new_incoming
@staticmethod
def _ignore_act(act, locations, lines, pipeline_tree):
# change next_node's incoming: task node、control node is different
# change incoming_flow's target to next node
# delete outgoing_flow
incoming_id_list, outgoing_id = act[PE.incoming], act[PE.outgoing]
incoming_id_list = incoming_id_list if isinstance(incoming_id_list, list) else [incoming_id_list]
outgoing_flow = pipeline_tree[PE.flows][outgoing_id]
target_id = outgoing_flow[PE.target]
next_node = \
pipeline_tree[PE.activities].get(target_id) or \
pipeline_tree[PE.gateways].get(target_id) or \
pipeline_tree[PE.end_event]
TaskFlowInstanceManager._replace_node_incoming(next_node=next_node,
replaced_incoming=outgoing_id,
new_incoming=incoming_id_list)
for incoming_id in incoming_id_list:
incoming_flow = pipeline_tree[PE.flows][incoming_id]
incoming_flow[PE.target] = next_node['id']
pipeline_tree[PE.flows].pop(outgoing_id)
# web location data
try:
locations.pop(act['id'])
lines.pop(outgoing_id)
for incoming_id in incoming_id_list:
lines[incoming_id][PE.target]['id'] = next_node['id']
except Exception as e:
logger.exception('create_pipeline_instance_exclude_task_nodes adjust web data error:%s' % e)
@staticmethod
def _remove_useless_constants(exclude_task_nodes_id, pipeline_tree):
# pop unreferenced constant
data = {}
for act_id, act in pipeline_tree[PE.activities].items():
if act['type'] == PE.ServiceActivity:
node_data = {('%s_%s' % (act_id, key)): value
for key, value in act['component']['data'].items()}
# PE.SubProcess
else:
node_data = {('%s_%s' % (act_id, key)): value
for key, value in act['constants'].items() if value['show_type'] == 'show'}
data.update(node_data)
for gw_id, gw in pipeline_tree[PE.gateways].items():
if gw['type'] == PE.ExclusiveGateway:
gw_data = {('%s_%s' % (gw_id, key)): {'value': value['evaluate']}
for key, value in gw['conditions'].items()}
data.update(gw_data)
# get all referenced constants in flow
constants = pipeline_tree[PE.constants]
referenced_keys = []
while True:
last_count = len(referenced_keys)
cons_pool = ConstantPool(data, lazy=True)
refs = cons_pool.get_reference_info(strict=False)
for keys in refs.values():
for key in keys:
# ad d outputs keys later
if key in constants and key not in referenced_keys:
referenced_keys.append(key)
data.update({key: constants[key]})
if len(referenced_keys) == last_count:
break
# keep outputs constants
def is_outputs(value):
check_type = value['source_type'] == 'component_outputs'
if not check_type:
return False
return value['source_info'].keys()[0] not in exclude_task_nodes_id
outputs_keys = [key for key, value in constants.items() if is_outputs(value)]
referenced_keys = list(set(referenced_keys + outputs_keys))
pipeline_tree[PE.outputs] = [key for key in pipeline_tree[PE.outputs] if key in referenced_keys]
# rebuild constants index
referenced_keys.sort(key=lambda x: constants[x]['index'])
new_constants = {}
for index, key in enumerate(referenced_keys):
value = constants[key]
value['index'] = index
# delete constant reference info to task node
for act_id in exclude_task_nodes_id:
if act_id in value['source_info']:
value['source_info'].pop(act_id)
new_constants[key] = value
pipeline_tree[PE.constants] = new_constants
@staticmethod
def _try_to_ignore_parallel(parallel, converge_id, lines, locations, pipeline_tree):
ignore_whole_parallel = True
converge = pipeline_tree[PE.gateways][converge_id]
parallel_outgoing = deepcopy(parallel[PE.outgoing])
for outgoing_id in parallel_outgoing:
# meet not converge node
if pipeline_tree[PE.flows][outgoing_id][PE.target] != converge_id:
ignore_whole_parallel = False
continue
# remove boring sequence
converge[PE.incoming].remove(outgoing_id)
parallel[PE.outgoing].remove(outgoing_id)
pipeline_tree[PE.flows].pop(outgoing_id)
lines.pop(outgoing_id)
if not ignore_whole_parallel:
return
target_of_converge = pipeline_tree[PE.flows][converge[PE.outgoing]][PE.target]
next_node_of_converge = \
pipeline_tree[PE.activities].get(target_of_converge) or \
pipeline_tree[PE.gateways].get(target_of_converge) or \
pipeline_tree[PE.end_event]
# remove converge outgoing
lines.pop(converge[PE.outgoing])
pipeline_tree[PE.flows].pop(converge[PE.outgoing])
# sequences not come from parallel to be removed
new_incoming_list = []
# redirect converge rerun incoming
for incoming in converge[PE.incoming]:
pipeline_tree[PE.flows][incoming][PE.target] = target_of_converge
lines[incoming][PE.target]['id'] = target_of_converge
new_incoming_list.append(incoming)
# redirect parallel rerun incoming
gateway_incoming = parallel[PE.incoming]
gateway_incoming = gateway_incoming if isinstance(gateway_incoming, list) \
else [gateway_incoming]
for incoming in gateway_incoming:
pipeline_tree[PE.flows][incoming][PE.target] = target_of_converge
lines[incoming][PE.target]['id'] = target_of_converge
new_incoming_list.append(incoming)
# process next node's incoming
TaskFlowInstanceManager._replace_node_incoming(next_node=next_node_of_converge,
replaced_incoming=converge[PE.outgoing],
new_incoming=new_incoming_list)
# remove parallel and converge
pipeline_tree[PE.gateways].pop(parallel['id'])
pipeline_tree[PE.gateways].pop(converge['id'])
locations.pop(parallel['id'])
locations.pop(converge['id'])
@staticmethod
def _remove_useless_parallel(pipeline_tree, lines, locations):
copy_tree = deepcopy(pipeline_tree)
for act in copy_tree['activities'].values():
format_node_io_to_list(act, o=False)
for gateway in copy_tree['gateways'].values():
format_node_io_to_list(gateway, o=False)
format_node_io_to_list(copy_tree['end_event'], o=False)
converges = validate_gateways(copy_tree)
while True:
gateway_count = len(pipeline_tree[PE.gateways])
for converge_id, converged_list in converges.items():
for converged in converged_list:
gateway = pipeline_tree[PE.gateways].get(converged)
if not gateway: # had been removed
continue
is_parallel = gateway[PE.type] in {PE.ParallelGateway, PE.ConditionalParallelGateway}
# only process parallel gateway
if not is_parallel:
continue
TaskFlowInstanceManager._try_to_ignore_parallel(parallel=gateway,
converge_id=converge_id,
lines=lines,
locations=locations,
pipeline_tree=pipeline_tree)
if gateway_count == len(pipeline_tree[PE.gateways]):
break
@staticmethod
def preview_pipeline_tree_exclude_task_nodes(pipeline_tree, exclude_task_nodes_id=None):
if exclude_task_nodes_id is None:
exclude_task_nodes_id = []
locations = {item['id']: item for item in pipeline_tree.get(PE.location, [])}
lines = {item['id']: item for item in pipeline_tree.get(PE.line, [])}
for act_id in exclude_task_nodes_id:
if act_id not in pipeline_tree[PE.activities]:
error = 'task node[id=%s] is not in template pipeline tree' % act_id
raise Exception(error)
act = pipeline_tree[PE.activities].pop(act_id)
if not act['optional']:
error = 'task node[id=%s] is not optional' % act_id
raise Exception(error)
TaskFlowInstanceManager._ignore_act(act=act,
locations=locations,
lines=lines,
pipeline_tree=pipeline_tree)
TaskFlowInstanceManager._remove_useless_parallel(pipeline_tree, lines, locations)
pipeline_tree[PE.line] = lines.values()
pipeline_tree[PE.location] = locations.values()
TaskFlowInstanceManager._remove_useless_constants(exclude_task_nodes_id=exclude_task_nodes_id,
pipeline_tree=pipeline_tree)
return True
def extend_classified_count(self, group_by, filters=None, page=None, limit=None):
"""
@summary: 兼容按照任务状态分类的扩展
@param group_by:
@param filters:
@param page:
@param limit:
@return:
"""
# 获得所有类型的dict列表
category_dict = dict(TASK_CATEGORY)
if filters is None:
filters = {}
prefix_filters = {}
for cond, value in filters.items():
# 如果conditions内容为空或为空字符,不可加入查询条件中
if value in ['None', ''] or cond in ['component_code', 'order_by', 'type']:
continue
if PIPELINE_REGEX.match(cond):
filter_cond = 'pipeline_instance__%s' % cond
# 时间需要大于小于
if cond == 'create_time':
filter_cond = '%s__gte' % filter_cond
prefix_filters.update({filter_cond: timestamp_to_datetime(value)})
continue
# 结束时间由创建时间来决定
if cond == 'finish_time':
filter_cond = 'pipeline_instance__create_time__lt'
prefix_filters.update(
{filter_cond: timestamp_to_datetime(value) + datetime.timedelta(days=1)})
continue
else:
filter_cond = cond
prefix_filters.update({filter_cond: value})
try:
taskflow = self.filter(**prefix_filters)
except Exception as e:
message = u"query_task_list params conditions[%s] have invalid key or value: %s" % (filters, e)
return False, message
if group_by == AE.state:
total = taskflow.count()
groups = [
{
'code': 'CREATED',
'name': _(u"未执行"),
'value': taskflow.filter(pipeline_instance__is_started=False).count()
},
{
'code': 'EXECUTING',
'name': _(u"执行中"),
'value': taskflow.filter(pipeline_instance__is_started=True,
pipeline_instance__is_finished=False).count()
},
{
'code': 'FINISHED',
'name': _(u"已完成"),
'value': taskflow.filter(pipeline_instance__is_finished=True).count()
}
]
elif group_by == AE.business__cc_id:
# 获取所有数据
total = taskflow.count()
taskflow_list = taskflow.values(AE.business__cc_id, AE.business__cc_name).annotate(
value=Count(group_by)).order_by()
groups = []
for data in taskflow_list:
groups.append({
'code': data.get(AE.business__cc_id),
'name': data.get(AE.business__cc_name),
'value': data.get('value', 0)
})
elif group_by == AE.appmaker_instance:
taskflow_values = taskflow.values("create_info")
order_by = filters.get("order_by", "-templateId")
business_id = filters.get("business__cc_id", '')
category = filters.get("category", '')
started_time = timestamp_to_datetime(filters["create_time"])
end_time = timestamp_to_datetime(filters["finish_time"]) + datetime.timedelta(days=1)
appmaker_data = AppMaker.objects.filter(is_deleted=False,
create_time__gte=started_time,
create_time__lte=end_time)
if business_id != '':
appmaker_data = appmaker_data.filter(business__cc_id=business_id)
if category != '':
appmaker_data = appmaker_data.filter(task_template__category=category)
# 获取所有轻应用数据数量
total = appmaker_data.count()
# 获得每一个轻应用的实例数量并变为 dict 字典数据进行查询
total_dict = {
appmaker['create_info']: appmaker['instance_total']
for appmaker in taskflow_values.annotate(instance_total=Count("create_info")).order_by()
}
id_list = appmaker_data.values_list("id")[:]
id_list = sorted(id_list,
key=lambda tuples_id: -total_dict.get(str(tuples_id[0]), 0))
id_list = id_list[(page - 1) * limit: | |
from __future__ import unicode_literals
import re
import os
import six
import json
import time
import shlex
import atexit
import psutil
import requests
import tornado.gen
from orderedattrdict import AttrDict
from threading import Thread, Lock
from subprocess import Popen, PIPE, STDOUT # nosec
from six.moves.urllib.parse import urlencode, urljoin
from tornado.web import HTTPError
from tornado.httpclient import AsyncHTTPClient
from gramex.config import app_log, variables, recursive_encode
from gramex.http import OK, BAD_REQUEST, GATEWAY_TIMEOUT, BAD_GATEWAY, CLIENT_TIMEOUT
from .basehandler import BaseHandler
_PPTX_MIME = 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
# HTTP headers not to forward to chromecapture.js.
# Keep this sync-ed with the same list in chromecapture.js
_IGNORE_HEADERS = {
'host', # The URL will determine the host
'connection', # Let Tornado manage the connection
'upgrade', # .. and the upgrades
'content-length', # The new request will have a different content - length
'content-md5', # ... and different content - md5
}
class Capture(object):
default_port = 9900 # Default port to run CaptureJS at
check_interval = 0.05 # Frequency (seconds) to check if self.started
# Set engine configurations for PhantomJS and Puppeteer
engines = AttrDict(
phantomjs=AttrDict(
cmd='phantomjs --ssl-protocol=any',
script='capture.js',
first_line=b'PhantomJS.*capture\\.js',
name='Capture',
version='1.0'
),
chrome=AttrDict(
cmd='node',
script='chromecapture.js',
first_line=b'node\\.js.*chromecapture\\.js',
name='ChromeCapture',
version='1.1'
),
)
'''
Create a proxy for capture.js. Typical usage::
capture = Capture()
with open('screenshot.png', 'wb') as handle:
handle.write(capture.png('https://gramener.com/'))
with open('screenshot.pdf', 'wb') as handle:
handle.write(capture.pdf('https://gramener.com/'))
The constructor accepts these optional parameters:
:arg int port: port where capture.js is running. Default: 9900
:arg string url: URL:port where PhantomJS is running with capture.js.
Default: ``http://localhost:<port>/``
:arg string cmd: Command to run PhantomJS with capture.js at the specified
port. Default: ``phantomjs $GRAMEXPATH/apps/capture/capture.js --port=<port>``
:arg int timeout: Seconds to wait for PhantomJS to timeout. Default: 10
The constructor runs :meth:`Capture.start` in a new thread, which checks if
capture.js is running at ``url``. If not, it runs ``cmd`` and checks again.
Until capture.js is detected, all capture methods will fail.
'''
def __init__(self, port=None, url=None, engine=None, cmd=None, timeout=10):
# Set default values for port, url and cmd
self.engine = self.engines['phantomjs' if engine is None else engine]
port = self.default_port if port is None else port
if url is None:
url = 'http://localhost:%d/' % port
if cmd is None:
script = os.path.join(variables.GRAMEXPATH, 'apps', 'capture', self.engine.script)
cmd = '%s "%s" --port=%d' % (self.engine.cmd, script, port)
self.url = url
self.first_line_re = re.compile(self.engine.first_line)
self.cmd = cmd
self.timeout = timeout
self.browser = AsyncHTTPClient()
self.lock = Lock()
self.started = False
self.start()
def start(self):
'''
Starts a thread and check if capture is already running at ``url``. If
not, start ``cmd`` and check again. Print logs from ``cmd``.
This method is thread-safe. It may be called as often as required.
:class:`CaptureHandler` calls this method if ``?start`` is passed.
'''
with self.lock:
thread = Thread(target=self._start)
thread.daemon = True
thread.start()
def _start(self):
'''
Check if capture is already running at ``url``. If not, start ``cmd``
and check again. Print logs from ``cmd``.
'''
self.started = False
script = self.engine.script
try:
# Check if capture.js is at the url specified
app_log.info('Pinging %s at %s', script, self.url)
r = requests.get(self.url, timeout=self.timeout)
self._validate_server(r)
self.started = True
except requests.ReadTimeout:
# If capture.js doesn't respond immediately, we haven't started
app_log.error('url: %s timed out', self.url)
except requests.ConnectionError:
# Try starting the process again
app_log.info('Starting %s via %s', script, self.cmd)
self.close()
# self.cmd is taken from the YAML configuration. Safe to run
self.proc = Popen(shlex.split(self.cmd), stdout=PIPE, stderr=STDOUT) # nosec
self.proc.poll()
atexit.register(self.close)
# TODO: what if readline() does not return quickly?
line = self.proc.stdout.readline().strip()
if not self.first_line_re.search(line):
return app_log.error('cmd: %s invalid. Returned "%s"', self.cmd, line)
app_log.info('Pinging %s at %s', script, self.url)
try:
r = requests.get(self.url, timeout=self.timeout)
self._validate_server(r)
pid = self.proc.pid
app_log.info(line.decode('utf-8') + ' live (pid=%s)', pid)
self.started = True
# Keep logging capture.js output until proc is killed by another thread
while hasattr(self, 'proc'):
line = self.proc.stdout.readline().strip()
if len(line) == 0:
app_log.info('%s terminated: pid=%d', script, pid)
self.started = False
break
# Capture won't print anything, unless there's a problem, or if debug is on.
# So log it at warning level not info.
app_log.warning(line.decode('utf-8'))
except Exception:
app_log.exception('Ran %s. But %s not at %s', self.cmd, script, self.url)
except Exception:
app_log.exception('Cannot start Capture')
def close(self):
'''Stop capture.js if it has been started by this object'''
if hasattr(self, 'proc'):
try:
process = psutil.Process(self.proc.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except psutil.NoSuchProcess:
app_log.info('%s PID %d already killed', self.engine.script, self.proc.pid)
pass
delattr(self, 'proc')
def _validate_server(self, response):
# Make sure that the response we got is from the right version of capture.js
server = response.headers.get('Server', '')
parts = server.split('/', 2)
script = self.engine.script
if not len(parts) == 2 or parts[0] != self.engine.name or parts[1] < self.engine.version:
raise RuntimeError('Server: %s at %s is not %s' % (server, self.url, script))
@tornado.gen.coroutine
def capture_async(self, headers=None, **kwargs):
'''
Returns a screenshot of the URL. Runs asynchronously in Gramex. Arguments
are same as :py:func:`capture`
'''
# If ?start is provided, start server and wait until timeout
if 'start' in kwargs:
self.start()
end_time = time.time() + self.timeout
while not self.started and time.time() < end_time:
yield tornado.gen.sleep(self.check_interval)
if not self.started:
raise RuntimeError('%s not started. See logs' % self.engine.script)
if six.PY2:
recursive_encode(kwargs)
r = yield self.browser.fetch(
self.url, method='POST', body=urlencode(kwargs, doseq=True), raise_error=False,
connect_timeout=self.timeout, request_timeout=self.timeout, headers=headers)
if r.code == OK:
self._validate_server(r)
raise tornado.gen.Return(r)
def capture(self, url, **kwargs):
'''
Return a screenshot of the URL.
:arg str url: URL to take a screenshot of
:arg str ext: format of output. Can be pdf, png, gif or jpg
:arg str selector: Restrict screenshot to (optional) CSS selector in URL
:arg int delay: milliseconds (or expression) to wait for before taking a screenshot
:arg str format: A3, A4, A5, Legal, Letter or Tabloid. Defaults to A4. For PDF
:arg str layout: A3, A4, A5, Legal, 16x9, 16x10, 4x3. Defaults to 4x3. For PPTX
:arg str orientation: portrait or landscape. Defaults to portrait. For PDF
:arg str header: header for the page. For PDF
:arg str footer: footer for the page. For PDF
:arg int width: screen width. Default: 1200. For PNG/GIF/JPG
:arg int height: screen height. Default: 768. For PNG/GIF/JPG
:arg float scale: zooms the screen by a factor. For PNG/GIF/JPG
:arg int dpi: dots (pixels) per inch. For PPTX
:arg str title: slide title. For PPTX
:arg int debug: sets log level for HTTP requests (2) and responses (1)
:return: a bytestring with the binary contents of the screenshot
:rtype: bytes
:raises RuntimeError: if capture.js is not running or fails
'''
# Ensure that we're connecting to the right version of capture.js
if not self.started:
end_time = time.time() + self.timeout
while not self.started and time.time() < end_time:
time.sleep(self.check_interval)
if not self.started:
raise RuntimeError('%s not started. See logs' % self.engine.script)
kwargs['url'] = url
r = requests.post(self.url, data=kwargs, timeout=self.timeout)
if r.status_code == OK:
self._validate_server(r)
return r.content
else:
raise RuntimeError('%s error: %s' % (self.engine.script, r.content))
def pdf(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='pdf'``.'''
kwargs['ext'] = 'pdf'
return self.capture(url, **kwargs)
def png(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='png'``.'''
kwargs['ext'] = 'png'
return self.capture(url, **kwargs)
def pptx(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='pptx'``.'''
kwargs['ext'] = 'pptx'
return self.capture(url, **kwargs)
def jpg(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='jpg'``.'''
kwargs['ext'] = 'jpg'
return self.capture(url, **kwargs)
def gif(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='gif'``.'''
kwargs['ext'] = 'gif'
return self.capture(url, **kwargs)
class CaptureHandler(BaseHandler):
'''
Renders a web page as a PDF or as an image. It accepts the same arguments as
:class:`Capture`.
The page is called with the same args as :meth:`Capture.capture`. It also
accepts a ``?start`` parameter that restarts capture.js if required.
'''
# Each config maps to a Capture() object. cls.captures[config] = Capture()
captures = {}
@classmethod
def setup(cls, port=None, url=None, engine=None, cmd=None, **kwargs):
super(CaptureHandler, cls).setup(**kwargs)
capture_kwargs = {}
for kwarg in ('timeout', ):
if kwarg in kwargs:
capture_kwargs[kwarg] = kwargs.pop(kwarg)
# Create a new Capture only if the config has changed
config = dict(engine=engine, port=port, url=url, cmd=cmd, **capture_kwargs)
config_str = json.dumps(config, separators=[',', ':'], sort_keys=True)
if config_str not in cls.captures:
cls.captures[config_str] = cls.capture = Capture(**config)
else:
cls.capture = cls.captures[config_str]
# TODO: if the old config is no longer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.